diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 755a66a5c..4836ee4f5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -61,6 +61,6 @@ any new packages are added to `vendor.yml` prior to patching. ## Integration Tests Before releasing a new version, in addition to the standard (isolated) tests -we smoke test the key features against a running Daemon and Broker. +we smoke test the key features against the latest code and Broker. -Run `make pact` to start the daemon and run integration tests. +Run `make pact` to run the integration tests. diff --git a/client/mock_service.go b/client/mock_service.go index a5490aec7..6b7ae0f18 100644 --- a/client/mock_service.go +++ b/client/mock_service.go @@ -2,9 +2,6 @@ package client import ( "fmt" - "path/filepath" - - "github.com/kardianos/osext" ) // MockService is a wrapper for the Pact Mock Service. @@ -24,6 +21,5 @@ func (m *MockService) NewService(args []string) Service { } func getMockServiceCommandPath() string { - dir, _ := osext.ExecutableFolder() - return fmt.Sprintf(filepath.Join(dir, "pact", "bin", "pact-mock-service")) + return fmt.Sprintf("pact-mock-service") } diff --git a/client/service.go b/client/service.go index 8939fc460..0a8b87e8d 100644 --- a/client/service.go +++ b/client/service.go @@ -1,7 +1,16 @@ +/* +Package client implements the raw interface to the Pact CLI tools: The Pact Mock Service and Provider Verification +"binaries." + +See https://github.com/pact-foundation/pact-provider-verifier and +https://github.com/bethesque/pact-mock_service for more on the Ruby "binaries". + +NOTE: The ultimate goal here is to replace the Ruby dependencies with a shared +library (Pact Reference - (https://github.com/pact-foundation/pact-reference/). +*/ package client import ( - "io" "os/exec" ) @@ -13,6 +22,5 @@ type Service interface { List() map[int]*exec.Cmd Command() *exec.Cmd Start() *exec.Cmd - Run(io.Writer) (*exec.Cmd, error) NewService(args []string) Service } diff --git a/client/service_manager.go b/client/service_manager.go index 921cb11bc..f3ddcdcb3 100644 --- a/client/service_manager.go +++ b/client/service_manager.go @@ -2,11 +2,9 @@ package client import ( "bufio" - "io" "log" "os" "os/exec" - "strings" "time" ) @@ -100,19 +98,6 @@ func (s *ServiceManager) List() map[int]*exec.Cmd { return s.processes } -// Run runs a service synchronously and log its output to the given Pipe. -func (s *ServiceManager) Run(w io.Writer) (*exec.Cmd, error) { - log.Println("[DEBUG] starting service") - log.Printf("[DEBUG] %s %s\n", s.Cmd, strings.Join(s.Args, " ")) - cmd := exec.Command(s.Cmd, s.Args...) - cmd.Env = s.Env - cmd.Stdout = w - cmd.Stderr = w - err := cmd.Run() - - return cmd, err -} - func (s *ServiceManager) Command() *exec.Cmd { cmd := exec.Command(s.Cmd, s.Args...) cmd.Env = s.Env diff --git a/client/verification_service.go b/client/verification_service.go index de52e289a..673c2cbef 100644 --- a/client/verification_service.go +++ b/client/verification_service.go @@ -3,9 +3,6 @@ package client import ( "fmt" "log" - "path/filepath" - - "github.com/kardianos/osext" ) // VerificationService is a wrapper for the Pact Provider Verifier Service. @@ -35,6 +32,5 @@ func (v *VerificationService) NewService(args []string) Service { } func getVerifierCommandPath() string { - dir, _ := osext.ExecutableFolder() - return fmt.Sprintf(filepath.Join(dir, "pact", "bin", "pact-provider-verifier")) + return fmt.Sprintf("pact-provider-verifier") } diff --git a/command/install.go b/command/install.go index 63f6de887..1157af7ce 100644 --- a/command/install.go +++ b/command/install.go @@ -22,6 +22,6 @@ var installCmd = &cobra.Command{ } func init() { - installCmd.Flags().StringVarP(&path, "path", "p", "/opt/pact", "Local daemon port to listen on") + installCmd.Flags().StringVarP(&path, "path", "p", "/opt/pact", "Location to install the Pact CLI tools") RootCmd.AddCommand(installCmd) } diff --git a/command/root.go b/command/root.go index e3d2d3cb6..87b58e864 100644 --- a/command/root.go +++ b/command/root.go @@ -1,4 +1,4 @@ -// Package command contains the basic CLI commands to run Pact Go as a daemon. +// Package command contains the basic CLI commands to run Pact Go. package command import ( diff --git a/doc.go b/doc.go index b23601bcc..dfede9320 100644 --- a/doc.go +++ b/doc.go @@ -18,10 +18,8 @@ A typical consumer-side test would look something like this: func TestLogin(t *testing.T) { - // Create Pact, connecting to local Daemon - // Ensure the port matches the daemon port! + // Create Pact client pact := Pact{ - Port: 6666, Consumer: "My Consumer", Provider: "My Provider", } @@ -123,11 +121,8 @@ Provider side Pact testing, involves verifying that the contract - the Pact file A typical Provider side test would like something like: func TestProvider_PactContract(t *testing.T) { - // Create Pact, connecting to local Daemon - // Ensure the port matches the daemon port! - pact := Pact{ - Port: 6666, - } + // Create Pact + pact := Pact{} go startMyAPI("http://localhost:8000") pact.VerifyProvider(types.VerifyRequest{ diff --git a/dsl/client.go b/dsl/client.go index 35116554c..01db007c7 100644 --- a/dsl/client.go +++ b/dsl/client.go @@ -1,16 +1,19 @@ package dsl import ( + "bytes" + "encoding/json" "fmt" + "io/ioutil" "log" "net" - "net/rpc" "net/url" "regexp" "strconv" "strings" "time" + "github.com/pact-foundation/pact-go/client" "github.com/pact-foundation/pact-go/types" ) @@ -18,51 +21,149 @@ var ( timeoutDuration = 10 * time.Second ) -// PactClient is the default implementation of the Client interface. +// PactClient is the main interface into starting/stopping +// the underlying Pact CLI subsystem type PactClient struct { - // Address the Daemon is listening on - Address string + pactMockSvcManager client.Service + verificationSvcManager client.Service // Track mock servers Servers []MockService - // Track stub servers - // Stubs []StubService + // Network Daemon is listening on + Network string + + // Address the Daemon is listening on + Address string } -// StartServer starts a remote Pact Mock Server. -func (p *PactClient) StartServer(args []string, port int) *MockService { - log.Println("[DEBUG] client: starting a server") +// NewClient creates a new Pact client manager +func NewClient(MockServiceManager client.Service, verificationServiceManager client.Service) *PactClient { + MockServiceManager.Setup() + verificationServiceManager.Setup() - return nil + return &PactClient{ + pactMockSvcManager: MockServiceManager, + verificationSvcManager: verificationServiceManager, + } +} + +// StartServer starts a remote Pact Mock Server. +func (p *PactClient) StartServer(args []string, port int) *types.MockServer { + log.Println("[DEBUG] client: starting a server with args:", args, "port:", port) + args = append(args, []string{"--port", strconv.Itoa(port)}...) + svc := p.pactMockSvcManager.NewService(args) + cmd := svc.Start() + + waitForPort(port, p.getNetworkInterface(), p.Address, fmt.Sprintf(`Timed out waiting for Mock Server to + start on port %d - are you sure it's running?`, port)) + + return &types.MockServer{ + Pid: cmd.Process.Pid, + Port: port, + } } -// ListServers list all available Mock Servers -func (p *PactClient) ListServers(args []string, port int) []*types.MockServer { +// ListServers lists all known Mock Servers +func (p *PactClient) ListServers() []*types.MockServer { log.Println("[DEBUG] client: starting a server") - return nil + var servers []*types.MockServer + + for port, s := range p.pactMockSvcManager.List() { + servers = append(servers, &types.MockServer{ + Pid: s.Process.Pid, + Port: port, + }) + } + + return servers } // StopServer stops a remote Pact Mock Server. -func (p *PactClient) StopServer(server *types.MockServer) *types.MockServer { +func (p *PactClient) StopServer(server *types.MockServer) (*types.MockServer, error) { log.Println("[DEBUG] client: stop server") - return nil + // TODO: Need to be able to get a non-zero exit code here! + _, server.Error = p.pactMockSvcManager.Stop(server.Pid) + return server, server.Error } // RemoveAllServers stops all remote Pact Mock Servers. func (p *PactClient) RemoveAllServers(server *types.MockServer) *[]types.MockServer { log.Println("[DEBUG] client: stop server") + for _, s := range p.verificationSvcManager.List() { + if s != nil { + p.pactMockSvcManager.Stop(s.Process.Pid) + } + } return nil } // VerifyProvider runs the verification process against a running Provider. func (p *PactClient) VerifyProvider(request types.VerifyRequest) (types.ProviderVerifierResponse, error) { log.Println("[DEBUG] client: verifying a provider") + var response types.ProviderVerifierResponse + + // Convert request into flags, and validate request + err := request.Validate() + if err != nil { + return response, err + } + + port := getPort(request.ProviderBaseURL) + + waitForPort(port, p.getNetworkInterface(), p.Address, fmt.Sprintf(`Timed out waiting for Provider API to start + on port %d - are you sure it's running?`, port)) + + // Run command, splitting out stderr and stdout. The command can fail for + // several reasons: + // 1. Command is unable to run at all. + // 2. Command runs, but fails for unknown reason. + // 3. Command runs, and returns exit status 1 because the tests fail. + // + // First, attempt to decode the response of the stdout. + // If that is successful, we are at case 3. Return stdout as message, no error. + // Else, return an error, include stderr and stdout in both the error and message. + svc := p.verificationSvcManager.NewService(request.Args) + cmd := svc.Command() - return types.ProviderVerifierResponse{}, nil + stdOutPipe, err := cmd.StdoutPipe() + if err != nil { + return response, err + } + stdErrPipe, err := cmd.StderrPipe() + if err != nil { + return response, err + } + err = cmd.Start() + if err != nil { + return response, err + } + stdOut, err := ioutil.ReadAll(stdOutPipe) + if err != nil { + return response, err + } + stdErr, err := ioutil.ReadAll(stdErrPipe) + if err != nil { + return response, err + } + + err = cmd.Wait() + + decoder := json.NewDecoder(bytes.NewReader(stdOut)) + + dErr := decoder.Decode(&response) + if dErr == nil { + return response, err + } + + if err == nil { + err = dErr + } + + return response, fmt.Errorf("error verifying provider: %s\n\nSTDERR:\n%s\n\nSTDOUT:\n%s", err, stdErr, stdOut) } // Get a port given a URL @@ -84,17 +185,7 @@ func getPort(rawURL string) int { return -1 } -func getHTTPClient(port int, network string, address string) (*rpc.Client, error) { - log.Println("[DEBUG] creating an HTTP client") - err := waitForPort(port, network, address, fmt.Sprintf(`Timed out waiting for Daemon on port %d - are you - sure it's running?`, port)) - if err != nil { - return nil, err - } - return rpc.DialHTTP(network, fmt.Sprintf("%s:%d", address, port)) -} - -// Use this to wait for a daemon to be running prior +// Use this to wait for a port to be running prior // to running tests. var waitForPort = func(port int, network string, address string, message string) error { log.Println("[DEBUG] waiting for port", port, "to become available") @@ -130,3 +221,12 @@ func sanitiseRubyResponse(response string) string { return s } + +// getNetworkInterface returns a default interface to communicate to the Daemon +// if none specified +func (p *PactClient) getNetworkInterface() string { + if p.Network == "" { + return "tcp" + } + return p.Network +} diff --git a/dsl/client_test.go b/dsl/client_test.go index 36e96d06c..cbbbbfe32 100644 --- a/dsl/client_test.go +++ b/dsl/client_test.go @@ -4,7 +4,6 @@ import ( "fmt" "log" "net" - "net/rpc" "os" "os/exec" "reflect" @@ -12,224 +11,67 @@ import ( "testing" "time" - "github.com/pact-foundation/pact-go/daemon" "github.com/pact-foundation/pact-go/types" "github.com/pact-foundation/pact-go/utils" ) -// Use this to wait for a daemon to be running prior -// to running tests -func waitForPortInTest(port int, t *testing.T) { - timeout := time.After(1 * time.Second) - for { - select { - case <-timeout: - t.Fatalf("Expected server to start < 1s.") - case <-time.After(50 * time.Millisecond): - _, err := net.Dial("tcp", fmt.Sprintf(":%d", port)) - if err == nil { - return - } - } - } -} - -// Use this to wait for a daemon to stop after running a test. -func waitForDaemonToShutdown(port int, t *testing.T) { - req := "" - res := "" - - waitForPortInTest(port, t) - - t.Log("Sending remote shutdown signal...\n") - client, err := rpc.DialHTTP("tcp", fmt.Sprintf(":%d", port)) - - err = client.Call("Daemon.StopDaemon", &req, &res) - if err != nil { - log.Fatal("rpc error:", err) - } - - t.Logf("Waiting for deamon to shutdown before next test") - timeout := time.After(1 * time.Second) - for { - select { - case <-timeout: - t.Fatalf("Expected server to shutdown < 1s.") - case <-time.After(50 * time.Millisecond): - conn, err := net.Dial("tcp", fmt.Sprintf(":%d", port)) - conn.SetReadDeadline(time.Now()) - defer conn.Close() - if err != nil { - return - } - buffer := make([]byte, 8) - _, err = conn.Read(buffer) - if err != nil { - return - } - } - } -} - -// This guy mocks out the underlying Service provider in the Daemon, -// but executes actual Daemon code. This means we don't spin up the real -// mock service but execute our code in isolation. -// -// Stubbing the exec.Cmd interface is hard, see fakeExec* functions for -// the magic. -func createDaemon(port int, success bool) (*daemon.Daemon, *daemon.ServiceMock) { - execFunc := fakeExecSuccessCommand - if !success { - execFunc = fakeExecFailCommand - } - svc := &daemon.ServiceMock{ - Cmd: "test", - Args: []string{}, - ServiceStopResult: true, - ServiceStopError: nil, - ExecFunc: execFunc, - ServiceList: map[int]*exec.Cmd{ - 1: fakeExecCommand("", success, ""), - 2: fakeExecCommand("", success, ""), - 3: fakeExecCommand("", success, ""), - }, - ServiceStartCmd: nil, - } - - // Start all processes to get the Pids! - for _, s := range svc.ServiceList { - s.Start() - } - - // Cleanup all Processes when we finish - defer func() { - for _, s := range svc.ServiceList { - s.Process.Kill() - } - }() - - d := daemon.NewDaemon(svc, svc) - go d.StartDaemon(port, "tcp", "") - return d, svc -} - func TestClient_List(t *testing.T) { - port, _ := utils.GetFreePort() - createDaemon(port, true) - waitForPortInTest(port, t) - defer waitForDaemonToShutdown(port, t) - client := &PactClient{Port: port} - - s := client.ListServers() - - if len(s.Servers) != 3 { - t.Fatalf("Expected 3 server to be running, got %d", len(s.Servers)) - } -} - -func TestClient_ListFail(t *testing.T) { - timeoutDuration = 50 * time.Millisecond - client := &PactClient{ /* don't supply port */ } - client.StartServer([]string{}, 0) - list := client.ListServers() + client, _ := createClient(true) + servers := client.ListServers() - if len(list.Servers) != 0 { - t.Fatalf("Expected 0 servers, got %d", len(list.Servers)) + if len(servers) != 3 { + t.Fatalf("Expected 3 server to be running, got %d", len(servers)) } - timeoutDuration = oldTimeoutDuration } func TestClient_StartServer(t *testing.T) { - port, _ := utils.GetFreePort() - _, svc := createDaemon(port, true) - waitForPortInTest(port, t) - defer waitForDaemonToShutdown(port, t) - client := &PactClient{Port: port} + client, svc := createClient(true) - mport, _ := utils.GetFreePort() - client.StartServer([]string{}, mport) + port, _ := utils.GetFreePort() + client.StartServer([]string{}, port) if svc.ServiceStartCount != 1 { t.Fatalf("Expected 1 server to have been started, got %d", svc.ServiceStartCount) } } -func TestClient_RPCErrors(t *testing.T) { - port, _ := utils.GetFreePort() - createDaemon(port, true) - - waitForPortInTest(port, t) - defer waitForDaemonToShutdown(port, t) - - // Mock out the RPC client - oldCommandStartServer := commandStartServer - oldCommandStopServer := commandStopServer - oldCommandVerifyProvider := commandVerifyProvider - oldCommandListServers := commandListServers - oldCommandStopDaemon := commandStopDaemon - - commandStartServer = "failcommand" - commandStopServer = "failcommand" - commandVerifyProvider = "failverifycommand" - commandListServers = "failcommand" - commandStopDaemon = "failcommand" +var oldTimeoutDuration = timeoutDuration - defer func() { - commandStartServer = oldCommandStartServer - commandStopServer = oldCommandStopServer - commandVerifyProvider = oldCommandVerifyProvider - commandListServers = oldCommandListServers - commandStopDaemon = oldCommandStopDaemon - }() +func TestClient_StartServerFail(t *testing.T) { + timeoutDuration = 50 * time.Millisecond - client := &PactClient{Port: port} - testCases := map[interface{}]func() interface{}{ - "rpc: service/method request ill-formed: failcommand": func() interface{} { - return client.StopDaemon().Error() - }, - &types.MockServer{}: func() interface{} { - return client.StopServer(&types.MockServer{}) - }, - &types.MockServer{}: func() interface{} { - return client.StartServer([]string{}, 0) - }, - &types.PactListResponse{}: func() interface{} { - return client.ListServers() - }, - "rpc: service/method request ill-formed: failverifycommand": func() interface{} { - _, err := client.VerifyProvider(types.VerifyRequest{}) - return err.Error() - }, + client, _ := createClient(false) + server := client.StartServer([]string{}, 0) + if server.Port != 0 { + t.Fatalf("Expected server to be empty %v", server) } + timeoutDuration = oldTimeoutDuration +} - for expected, testCase := range testCases { - res := testCase() - if !reflect.DeepEqual(expected, res) { - t.Fatalf("Expected '%v' but got '%v'", expected, res) - } +func TestClient_StopServer(t *testing.T) { + client, svc := createClient(true) + + client.StopServer(&types.MockServer{}) + if svc.ServiceStopCount != 1 { + t.Fatalf("Expected 1 server to have been stopped, got %d", svc.ServiceStartCount) } } -func TestClient_getPort(t *testing.T) { - testCases := map[string]int{ - "http://localhost:8000": 8000, - "http://localhost": 80, - "https://localhost": 443, - ":::::": -1, +func TestClient_StopServerFail(t *testing.T) { + timeoutDuration = 50 * time.Millisecond + client, _ := createClient(true) + res, err := client.StopServer(&types.MockServer{}) + should := &types.MockServer{} + if !reflect.DeepEqual(res, should) { + t.Fatalf("Expected nil object but got a difference: %v != %v", res, should) } - - for host, port := range testCases { - if getPort(host) != port { - t.Fatalf("Expected host '%s' to return port '%d' but got '%d'", host, port, getPort(host)) - } + if err != nil { + t.Fatalf("wanted error, got none") } + timeoutDuration = oldTimeoutDuration } func TestClient_VerifyProvider(t *testing.T) { - port, _ := utils.GetFreePort() - createDaemon(port, true) - waitForPortInTest(port, t) - defer waitForDaemonToShutdown(port, t) - client := &PactClient{Port: port} + client, _ := createClient(true) ms := setupMockServer(true, t) defer ms.Close() @@ -249,11 +91,7 @@ func TestClient_VerifyProvider(t *testing.T) { } func TestClient_VerifyProviderFailValidation(t *testing.T) { - port, _ := utils.GetFreePort() - createDaemon(port, true) - waitForPortInTest(port, t) - defer waitForDaemonToShutdown(port, t) - client := &PactClient{Port: port} + client, _ := createClient(true) req := types.VerifyRequest{} _, err := client.VerifyProvider(req) @@ -268,11 +106,7 @@ func TestClient_VerifyProviderFailValidation(t *testing.T) { } func TestClient_VerifyProviderFailExecution(t *testing.T) { - port, _ := utils.GetFreePort() - createDaemon(port, false) - waitForPortInTest(port, t) - defer waitForDaemonToShutdown(port, t) - client := &PactClient{Port: port} + client, _ := createClient(false) ms := setupMockServer(true, t) defer ms.Close() @@ -292,64 +126,93 @@ func TestClient_VerifyProviderFailExecution(t *testing.T) { } } -var oldTimeoutDuration = timeoutDuration - -func TestClient_StartServerFail(t *testing.T) { - timeoutDuration = 50 * time.Millisecond +func TestClient_getPort(t *testing.T) { + testCases := map[string]int{ + "http://localhost:8000": 8000, + "http://localhost": 80, + "https://localhost": 443, + ":::::": -1, + } - client := &PactClient{ /* don't supply port */ } - server := client.StartServer([]string{}, 0) - if server.Port != 0 { - t.Fatalf("Expected server to be empty %v", server) + for host, port := range testCases { + if getPort(host) != port { + t.Fatalf("Expected host '%s' to return port '%d' but got '%d'", host, port, getPort(host)) + } } - timeoutDuration = oldTimeoutDuration } -func TestClient_StopServer(t *testing.T) { - port, _ := utils.GetFreePort() - _, svc := createDaemon(port, true) - waitForPortInTest(port, t) - defer waitForDaemonToShutdown(port, t) - client := &PactClient{Port: port} - - client.StopServer(&types.MockServer{}) - if svc.ServiceStopCount != 1 { - t.Fatalf("Expected 1 server to have been stopped, got %d", svc.ServiceStartCount) +func TestClient_sanitiseRubyResponse(t *testing.T) { + var tests = map[string]string{ + "this is a sentence with a hash # so it should be in tact": "this is a sentence with a hash # so it should be in tact", + "this is a sentence with a hash and newline\n#so it should not be in tact": "this is a sentence with a hash and newline", + "this is a sentence with a ruby statement bundle exec rake pact:verify so it should not be in tact": "", + "this is a sentence with a ruby statement\nbundle exec rake pact:verify so it should not be in tact": "this is a sentence with a ruby statement", + "this is a sentence with multiple new lines \n\n\n\n\nit should not be in tact": "this is a sentence with multiple new lines \nit should not be in tact", + } + for k, v := range tests { + test := sanitiseRubyResponse(k) + if !strings.EqualFold(strings.TrimSpace(test), strings.TrimSpace(v)) { + log.Fatalf("Got `%s', Expected `%s`", strings.TrimSpace(test), strings.TrimSpace(v)) + } } } -func TestClient_StopServerFail(t *testing.T) { - timeoutDuration = 50 * time.Millisecond - client := &PactClient{ /* don't supply port */ } - res := client.StopServer(&types.MockServer{}) - should := &types.MockServer{} - if !reflect.DeepEqual(res, should) { - t.Fatalf("Expected nil object but got a difference: %v != %v", res, should) +// Use this to wait for a client to be running prior +// to running tests +func waitForPortInTest(port int, t *testing.T) { + timeout := time.After(1 * time.Second) + for { + select { + case <-timeout: + t.Fatalf("Expected server to start < 1s.") + case <-time.After(50 * time.Millisecond): + _, err := net.Dial("tcp", fmt.Sprintf(":%d", port)) + if err == nil { + return + } + } } - timeoutDuration = oldTimeoutDuration } -func TestClient_StopDaemon(t *testing.T) { - port, _ := utils.GetFreePort() - createDaemon(port, true) - waitForPortInTest(port, t) - client := &PactClient{Port: port} - - err := client.StopDaemon() - if err != nil { - t.Fatalf("Err: %v", err) +// This guy mocks out the underlying Service provider in the client, +// but executes actual client code. This means we don't spin up the real +// mock service but execute our code in isolation. +// +// Stubbing the exec.Cmd interface is hard, see fakeExec* functions for +// the magic. +func createClient(success bool) (*PactClient, *ServiceMock) { + execFunc := fakeExecSuccessCommand + if !success { + execFunc = fakeExecFailCommand + } + svc := &ServiceMock{ + Cmd: "test", + Args: []string{}, + ServiceStopResult: true, + ServiceStopError: nil, + ExecFunc: execFunc, + ServiceList: map[int]*exec.Cmd{ + 1: fakeExecCommand("", success, ""), + 2: fakeExecCommand("", success, ""), + 3: fakeExecCommand("", success, ""), + }, + ServiceStartCmd: nil, } - waitForDaemonToShutdown(port, t) -} -func TestClient_StopDaemonFail(t *testing.T) { - timeoutDuration = 50 * time.Millisecond - client := &PactClient{ /* don't supply port */ } - err := client.StopDaemon() - if err == nil { - t.Fatalf("Expected error but got none") + // Start all processes to get the Pids! + for _, s := range svc.ServiceList { + s.Start() } - timeoutDuration = oldTimeoutDuration + + // Cleanup all Processes when we finish + defer func() { + for _, s := range svc.ServiceList { + s.Process.Kill() + } + }() + + d := NewClient(svc, svc) + return d, svc } // Adapted from http://npf.io/2015/06/testing-exec-command/ @@ -361,6 +224,7 @@ var fakeExecFailCommand = func() *exec.Cmd { } func fakeExecCommand(command string, success bool, args ...string) *exec.Cmd { + log.Println("[YAEUT] Creating fake exec command with success", success) cs := []string{"-test.run=TestHelperProcess", "--", command} cs = append(cs, args...) cmd := exec.Command(os.Args[0], cs...) @@ -385,19 +249,3 @@ func TestHelperProcess(t *testing.T) { fmt.Fprintf(os.Stdout, `{"summary_line":"1 examples, 0 failures"}`) os.Exit(0) } - -func Test_sanitiseRubyResponse(t *testing.T) { - var tests = map[string]string{ - "this is a sentence with a hash # so it should be in tact": "this is a sentence with a hash # so it should be in tact", - "this is a sentence with a hash and newline\n#so it should not be in tact": "this is a sentence with a hash and newline", - "this is a sentence with a ruby statement bundle exec rake pact:verify so it should not be in tact": "", - "this is a sentence with a ruby statement\nbundle exec rake pact:verify so it should not be in tact": "this is a sentence with a ruby statement", - "this is a sentence with multiple new lines \n\n\n\n\nit should not be in tact": "this is a sentence with multiple new lines \nit should not be in tact", - } - for k, v := range tests { - test := sanitiseRubyResponse(k) - if !strings.EqualFold(strings.TrimSpace(test), strings.TrimSpace(v)) { - log.Fatalf("Got `%s', Expected `%s`", strings.TrimSpace(test), strings.TrimSpace(v)) - } - } -} diff --git a/dsl/pact.go b/dsl/pact.go index 93ff287fa..c0e739923 100644 --- a/dsl/pact.go +++ b/dsl/pact.go @@ -12,6 +12,7 @@ import ( "testing" "github.com/hashicorp/logutils" + "github.com/pact-foundation/pact-go/client" "github.com/pact-foundation/pact-go/types" "github.com/pact-foundation/pact-go/utils" ) @@ -21,9 +22,6 @@ type Pact struct { // Current server for the consumer. Server *types.MockServer - // Port the Pact Daemon is running on. - Port int - // Pact RPC Client. pactClient *PactClient @@ -62,12 +60,12 @@ type Pact struct { // Defaults to 2. SpecificationVersion int - // Host is the address of the Daemon, Mock and Verification Service runs on + // Host is the address of the Mock and Verification Service runs on // Examples include 'localhost', '127.0.0.1', '[::1]' // Defaults to 'localhost' Host string - // Network is the network of the Daemon, Mock and Verification Service + // Network is the network of the Mock and Verification Service // Examples include 'tcp', 'tcp4', 'tcp6' // Defaults to 'tcp' Network string @@ -81,7 +79,7 @@ type Pact struct { // required things. Will automatically start a Mock Service if none running. func (p *Pact) AddInteraction() *Interaction { p.Setup(true) - log.Printf("[DEBUG] pact add interaction") + log.Println("[DEBUG] pact add interaction") i := &Interaction{} p.Interactions = append(p.Interactions, i) return i @@ -92,7 +90,7 @@ func (p *Pact) AddInteraction() *Interaction { // has been started. func (p *Pact) Setup(startMockServer bool) *Pact { p.setupLogging() - log.Printf("[DEBUG] pact setup") + log.Println("[DEBUG] pact setup") dir, _ := os.Getwd() if p.Network == "" { @@ -116,8 +114,7 @@ func (p *Pact) Setup(startMockServer bool) *Pact { } if p.pactClient == nil { - client := &PactClient{Port: p.Port, Network: p.Network, Address: p.Host} - p.pactClient = client + p.pactClient = NewClient(&client.MockService{}, &client.VerificationService{}) } // Need to predefine due to scoping @@ -131,9 +128,9 @@ func (p *Pact) Setup(startMockServer bool) *Pact { if perr != nil { log.Println("[ERROR] unable to find free port, mockserver will fail to start") } - log.Println("[DEBUG] starting mock service on port:", port) if p.Server == nil && startMockServer { + log.Println("[DEBUG] starting mock service on port:", port) args := []string{ "--pact-specification-version", fmt.Sprintf("%d", p.SpecificationVersion), @@ -166,15 +163,20 @@ func (p *Pact) setupLogging() { } log.SetOutput(p.logFilter) } - log.Printf("[DEBUG] pact setup logging") + log.Println("[DEBUG] pact setup logging") } // Teardown stops the Pact Mock Server. This usually is called on completion // of each test suite. func (p *Pact) Teardown() *Pact { - log.Printf("[DEBUG] teardown") + log.Println("[DEBUG] teardown") if p.Server != nil { - p.Server = p.pactClient.StopServer(p.Server) + server, err := p.pactClient.StopServer(p.Server) + + if err != nil { + log.Println("error:", err) + } + p.Server = server } return p } @@ -183,7 +185,7 @@ func (p *Pact) Teardown() *Pact { // Will cleanup interactions between tests within a suite. func (p *Pact) Verify(integrationTest func() error) error { p.Setup(true) - log.Printf("[DEBUG] pact verify") + log.Println("[DEBUG] pact verify") mockServer := &MockService{ BaseURL: fmt.Sprintf("http://%s:%d", p.Host, p.Server.Port), Consumer: p.Consumer, @@ -220,7 +222,7 @@ func (p *Pact) Verify(integrationTest func() error) error { // configured file. func (p *Pact) WritePact() error { p.Setup(true) - log.Printf("[DEBUG] pact write Pact file") + log.Println("[DEBUG] pact write Pact file") mockServer := MockService{ BaseURL: fmt.Sprintf("http://%s:%d", p.Host, p.Server.Port), Consumer: p.Consumer, @@ -242,14 +244,14 @@ func (p *Pact) VerifyProviderRaw(request types.VerifyRequest) (types.ProviderVer // If we provide a Broker, we go to it to find consumers if request.BrokerURL != "" { - log.Printf("[DEBUG] pact provider verification - finding all consumers from broker: %s", request.BrokerURL) + log.Println("[DEBUG] pact provider verification - finding all consumers from broker: ", request.BrokerURL) err := findConsumers(p.Provider, &request) if err != nil { return types.ProviderVerifierResponse{}, err } } - log.Printf("[DEBUG] pact provider verification") + log.Println("[DEBUG] pact provider verification") return p.pactClient.VerifyProvider(request) } diff --git a/dsl/pact_test.go b/dsl/pact_test.go index 1e953f1ea..a069e0fb2 100644 --- a/dsl/pact_test.go +++ b/dsl/pact_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/pact-foundation/pact-go/types" - "github.com/pact-foundation/pact-go/utils" ) func TestPact_setupLogging(t *testing.T) { @@ -40,21 +39,6 @@ func TestPact_setupLogging(t *testing.T) { } } -// Capture output from a log write -func captureOutput(action func()) string { - rescueStderr := os.Stderr - r, w, _ := os.Pipe() - os.Stderr = w - - action() - - w.Close() - out, _ := ioutil.ReadAll(r) - os.Stderr = rescueStderr - - return strings.TrimSpace(string(out)) -} - func TestPact_Verify(t *testing.T) { ms := setupMockServer(true, t) defer ms.Close() @@ -88,7 +72,6 @@ func TestPact_Verify(t *testing.T) { t.Fatalf("Expected test function to be called but it was not") } } - func TestPact_VerifyMockServerFail(t *testing.T) { ms := setupMockServer(true, t) defer ms.Close() @@ -167,16 +150,12 @@ func TestPact_VerifyFail(t *testing.T) { } func TestPact_Setup(t *testing.T) { - t.Log("testing pact setup") - port, _ := utils.GetFreePort() - createDaemon(port, true) - - pact := &Pact{Port: port, LogLevel: "DEBUG"} + pact := &Pact{LogLevel: "DEBUG"} pact.Setup(true) if pact.Server == nil { t.Fatalf("Expected server to be created") } - pact2 := &Pact{Port: port, LogLevel: "DEBUG"} + pact2 := &Pact{LogLevel: "DEBUG"} pact2.Setup(false) if pact2.Server != nil { t.Fatalf("Expected server to be nil") @@ -187,10 +166,7 @@ func TestPact_Setup(t *testing.T) { } func TestPact_SetupWithMockServerPort(t *testing.T) { - port, _ := utils.GetFreePort() - createDaemon(port, true) - - pact := &Pact{Port: port, LogLevel: "DEBUG", AllowedMockServerPorts: "32768"} + pact := &Pact{LogLevel: "DEBUG", AllowedMockServerPorts: "32768"} pact.Setup(true) if pact.Server == nil { t.Fatalf("Expected server to be created") @@ -201,10 +177,7 @@ func TestPact_SetupWithMockServerPort(t *testing.T) { } func TestPact_SetupWithMockServerPortCSV(t *testing.T) { - port, _ := utils.GetFreePort() - createDaemon(port, true) - - pact := &Pact{Port: port, LogLevel: "DEBUG", AllowedMockServerPorts: "32768,32769"} + pact := &Pact{LogLevel: "DEBUG", AllowedMockServerPorts: "32768,32769"} pact.Setup(true) if pact.Server == nil { t.Fatalf("Expected server to be created") @@ -215,24 +188,19 @@ func TestPact_SetupWithMockServerPortCSV(t *testing.T) { } func TestPact_SetupWithMockServerPortRange(t *testing.T) { - port, _ := utils.GetFreePort() - createDaemon(port, true) - - pact := &Pact{Port: port, LogLevel: "DEBUG", AllowedMockServerPorts: "32768-32770"} + c, _ := createClient(true) + pact := &Pact{LogLevel: "DEBUG", AllowedMockServerPorts: "32768-32770", pactClient: c} pact.Setup(true) if pact.Server == nil { t.Fatalf("Expected server to be created") } if pact.Server.Port != 32768 { - t.Fatalf("Expected mock daemon to be started on specific port") + t.Fatal("Expected mock daemon to be started on specific port, got", 32768) } } func TestPact_Invalidrange(t *testing.T) { - port, _ := utils.GetFreePort() - createDaemon(port, true) - - pact := &Pact{Port: port, LogLevel: "DEBUG", AllowedMockServerPorts: "abc-32770"} + pact := &Pact{LogLevel: "DEBUG", AllowedMockServerPorts: "abc-32770"} pact.Setup(true) if pact.Server == nil { t.Fatalf("Expected server to be created") @@ -243,34 +211,19 @@ func TestPact_Invalidrange(t *testing.T) { } func TestPact_Teardown(t *testing.T) { - old := waitForPort - defer func() { waitForPort = old }() - waitForPort = func(int, string, string, string) error { - return nil - } - port, _ := utils.GetFreePort() - createDaemon(port, true) - waitForPortInTest(port, t) - - pact := &Pact{Port: port, LogLevel: "DEBUG"} + c, _ := createClient(true) + pact := &Pact{LogLevel: "DEBUG", pactClient: c} pact.Setup(true) pact.Teardown() - if pact.Server.Status != 0 { - t.Fatalf("Expected server exit status to be 0 but got %d", pact.Server.Status) + if pact.Server.Error != nil { + t.Fatal("got error:", pact.Server.Error) } } func TestPact_VerifyProvider(t *testing.T) { - old := waitForPort - defer func() { waitForPort = old }() - waitForPort = func(int, string, string, string) error { - return nil - } - port, _ := utils.GetFreePort() - createDaemon(port, true) - waitForPortInTest(port, t) + c, _ := createClient(true) - pact := &Pact{Port: port, LogLevel: "DEBUG", pactClient: &PactClient{Port: port}} + pact := &Pact{LogLevel: "DEBUG", pactClient: c} _, err := pact.VerifyProviderRaw(types.VerifyRequest{ ProviderBaseURL: "http://www.foo.com", PactURLs: []string{"foo.json", "bar.json"}, @@ -284,16 +237,9 @@ func TestPact_VerifyProvider(t *testing.T) { func TestPact_VerifyProviderBroker(t *testing.T) { s := setupMockBroker(false) defer s.Close() - old := waitForPort - defer func() { waitForPort = old }() - waitForPort = func(int, string, string, string) error { - return nil - } - port, _ := utils.GetFreePort() - createDaemon(port, true) - waitForPortInTest(port, t) + c, _ := createClient(true) - pact := &Pact{Port: port, LogLevel: "DEBUG", pactClient: &PactClient{Port: port}, Provider: "bobby"} + pact := &Pact{LogLevel: "DEBUG", pactClient: c, Provider: "bobby"} _, err := pact.VerifyProviderRaw(types.VerifyRequest{ ProviderBaseURL: "http://www.foo.com", BrokerURL: s.URL, @@ -309,16 +255,9 @@ func TestPact_VerifyProviderBroker(t *testing.T) { func TestPact_VerifyProviderBrokerNoConsumers(t *testing.T) { s := setupMockBroker(false) defer s.Close() - old := waitForPort - defer func() { waitForPort = old }() - waitForPort = func(int, string, string, string) error { - return nil - } - port, _ := utils.GetFreePort() - createDaemon(port, true) - waitForPortInTest(port, t) + c, _ := createClient(true) - pact := &Pact{Port: port, LogLevel: "DEBUG", pactClient: &PactClient{Port: port}, Provider: "providernotexist"} + pact := &Pact{LogLevel: "DEBUG", pactClient: c, Provider: "providernotexist"} _, err := pact.VerifyProviderRaw(types.VerifyRequest{ ProviderBaseURL: "http://www.foo.com", BrokerURL: s.URL, @@ -330,16 +269,8 @@ func TestPact_VerifyProviderBrokerNoConsumers(t *testing.T) { } func TestPact_VerifyProviderFail(t *testing.T) { - old := waitForPort - defer func() { waitForPort = old }() - waitForPort = func(int, string, string, string) error { - return nil - } - port, _ := utils.GetFreePort() - createDaemon(port, false) - waitForPortInTest(port, t) - - pact := &Pact{Port: port, LogLevel: "DEBUG", pactClient: &PactClient{Port: port}} + c, _ := createClient(false) + pact := &Pact{LogLevel: "DEBUG", pactClient: c} _, err := pact.VerifyProviderRaw(types.VerifyRequest{ ProviderBaseURL: "http://www.foo.com", PactURLs: []string{"foo.json", "bar.json"}, @@ -371,3 +302,18 @@ func TestPact_AddInteraction(t *testing.T) { t.Fatalf("Expected 2 interactions to be added to Pact but got %d", len(pact.Interactions)) } } + +// Capture output from a log write +func captureOutput(action func()) string { + rescueStderr := os.Stderr + r, w, _ := os.Pipe() + os.Stderr = w + + action() + + w.Close() + out, _ := ioutil.ReadAll(r) + os.Stderr = rescueStderr + + return strings.TrimSpace(string(out)) +} diff --git a/client/service_mock.go b/dsl/service_mock.go similarity index 78% rename from client/service_mock.go rename to dsl/service_mock.go index 4b0c08444..978264c8d 100644 --- a/client/service_mock.go +++ b/dsl/service_mock.go @@ -1,9 +1,9 @@ -package client +package dsl import ( - "io" - "log" "os/exec" + + "github.com/pact-foundation/pact-go/client" ) // ServiceMock is the mock implementation of the Service interface. @@ -40,17 +40,6 @@ func (s *ServiceMock) List() map[int]*exec.Cmd { return s.ServiceList } -// Run runs a service synchronously and log its output to the given Pipe. -func (s *ServiceMock) Run(w io.Writer) (*exec.Cmd, error) { - log.Println("[DEBUG] starting service") - cmd := s.ExecFunc() - cmd.Stdout = w - cmd.Stderr = w - err := cmd.Run() - - return cmd, err -} - // Start a Service and log its output. func (s *ServiceMock) Start() *exec.Cmd { @@ -65,11 +54,12 @@ func (s *ServiceMock) Start() *exec.Cmd { return cmd } +// Command implements to Command operation func (s *ServiceMock) Command() *exec.Cmd { return s.ExecFunc() } // NewService creates a new MockService with default settings. -func (s *ServiceMock) NewService(args []string) Service { +func (s *ServiceMock) NewService(args []string) client.Service { return s } diff --git a/examples/consumer/goconsumer/user_service_test.go b/examples/consumer/goconsumer/user_service_test.go index 5706ce7a6..e0f39686b 100644 --- a/examples/consumer/goconsumer/user_service_test.go +++ b/examples/consumer/goconsumer/user_service_test.go @@ -24,10 +24,7 @@ var pact dsl.Pact var form url.Values var rr http.ResponseWriter var req *http.Request - var name = "Jean-Marie de La Beaujardière😀😍" - -// var name = "billy" var like = dsl.Like var eachLike = dsl.EachLike var term = dsl.Term @@ -80,7 +77,7 @@ func setup() { // Login form values form = url.Values{} - form.Add("username") + form.Add("username", name) form.Add("password", "issilly") // Create a request to pass to our handler. @@ -94,9 +91,7 @@ func setup() { // Create Pact connecting to local Daemon func createPact() dsl.Pact { - pactDaemonPort := 6666 return dsl.Pact{ - Port: pactDaemonPort, Consumer: "billy", Provider: "bobby", LogDir: logDir, @@ -135,13 +130,15 @@ func TestPactConsumerLoginHandler_UserExists(t *testing.T) { Given("User billy exists"). UponReceiving("A request to login with user 'billy'"). WithRequest(dsl.Request{ - Method: "POST", - Path: "/users/login", - Body: loginRequest, + Method: "POST", + Path: "/users/login", + Body: loginRequest, + Headers: commonHeaders, }). WillRespondWith(dsl.Response{ - Status: 200, - Body: body, + Status: 200, + Body: body, + Headers: commonHeaders, }) err := pact.Verify(testBillyExists) diff --git a/examples/gin/provider/user_service_test.go b/examples/gin/provider/user_service_test.go index c4496a7a8..3d956dcf7 100644 --- a/examples/gin/provider/user_service_test.go +++ b/examples/gin/provider/user_service_test.go @@ -129,7 +129,6 @@ var billyUnauthorized = &examples.UserRepository{ func createPact() dsl.Pact { // Create Pact connecting to local Daemon return dsl.Pact{ - Port: 6666, Consumer: "billy", Provider: "bobby", LogDir: logDir, diff --git a/examples/go-kit/provider/user_service_test.go b/examples/go-kit/provider/user_service_test.go index ceff9be62..cb2a282a2 100644 --- a/examples/go-kit/provider/user_service_test.go +++ b/examples/go-kit/provider/user_service_test.go @@ -109,10 +109,7 @@ func TestPact_GoKitProvider(t *testing.T) { // Setup the Pact client. func createPact() dsl.Pact { - // Create Pact connecting to local Daemon - pactDaemonPort := 6666 return dsl.Pact{ - Port: pactDaemonPort, Consumer: "billy", Provider: "bobby", LogDir: logDir, diff --git a/examples/mux/provider/user_service_test.go b/examples/mux/provider/user_service_test.go index 4bdad88ee..18769bda3 100644 --- a/examples/mux/provider/user_service_test.go +++ b/examples/mux/provider/user_service_test.go @@ -155,7 +155,6 @@ var billyUnauthorized = &examples.UserRepository{ func createPact() dsl.Pact { // Create Pact connecting to local Daemon return dsl.Pact{ - Port: 6666, Consumer: "billy", Provider: "bobby", LogDir: logDir, diff --git a/install/profile.tmp b/install/profile.tmp new file mode 100644 index 000000000..c62dd8b94 --- /dev/null +++ b/install/profile.tmp @@ -0,0 +1 @@ +mode: count diff --git a/scripts/build_standalone_packages.sh b/scripts/build_standalone_packages.sh index 6785a5b8b..4fbec807e 100755 --- a/scripts/build_standalone_packages.sh +++ b/scripts/build_standalone_packages.sh @@ -5,7 +5,7 @@ set -e mkdir -p build cd build -PACT_STANDALONE_VERSION=1.22.0 +PACT_STANDALONE_VERSION=1.30.0 urls=(https://github.com/pact-foundation/pact-ruby-standalone/releases/download/v$PACT_STANDALONE_VERSION/pact-$PACT_STANDALONE_VERSION-linux-x86.tar.gz https://github.com/pact-foundation/pact-ruby-standalone/releases/download/v$PACT_STANDALONE_VERSION/pact-$PACT_STANDALONE_VERSION-linux-x86_64.tar.gz https://github.com/pact-foundation/pact-ruby-standalone/releases/download/v$PACT_STANDALONE_VERSION/pact-$PACT_STANDALONE_VERSION-osx.tar.gz https://github.com/pact-foundation/pact-ruby-standalone/releases/download/v$PACT_STANDALONE_VERSION/pact-$PACT_STANDALONE_VERSION-win32.zip) # Provider Verifier @@ -16,37 +16,5 @@ if [ ! -f pact-$PACT_STANDALONE_VERSION-linux-x86.tar.gz ]; then wget $url done else - echo "pact provider verifier already generated, run 'make clean' to generate a new package" -fi - -osarchs=(osx win32 linux-x86 linux-x86_64) - -echo "--> Packaging distributions" -mkdir -p ../dist -for os in "${osarchs[@]}" -do - echo "Building ${os}" - osarch="" - if [ "${os}" = "win32" ]; then - osarch="windows_386" - cp pact-go_$osarch.exe pact-go - zip -u pact-$PACT_STANDALONE_VERSION-$os.zip pact-go - cp pact-$PACT_STANDALONE_VERSION-$os.zip ../dist/pact-go_$osarch.zip - else - if [ "${os}" = "osx" ]; then - osarch="darwin_amd64" - cp pact-go_$osarch pact-go - elif [ "${os}" = "linux-x86" ]; then - osarch="linux_386" - cp pact-go_$osarch pact-go - elif [ "${os}" = "linux-x86_64" ]; then - osarch="linux_amd64" - cp pact-go_$osarch pact-go - fi - - gunzip pact-$PACT_STANDALONE_VERSION-$os.tar.gz - tar -rf pact-$PACT_STANDALONE_VERSION-$os.tar pact-go - gzip pact-$PACT_STANDALONE_VERSION-$os.tar - cp pact-$PACT_STANDALONE_VERSION-$os.tar.gz ../dist/pact-go_$osarch.tar.gz - fi -done + echo "Pact CLI tools already downloaded, run 'make clean' to refresh" +fi \ No newline at end of file diff --git a/scripts/dev.sh b/scripts/dev.sh deleted file mode 100755 index d20f3d1bb..000000000 --- a/scripts/dev.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -e - -set -e - -./scripts/build.sh -./scripts/package.sh - -# Setup dev -echo "==> Creating OS distributions..." -tar -xzf dist/darwin-amd64.tar.gz -C $(pwd) diff --git a/scripts/pact.sh b/scripts/pact.sh index b807c98bb..d02b09614 100755 --- a/scripts/pact.sh +++ b/scripts/pact.sh @@ -1,61 +1,29 @@ #!/bin/bash +e -############################################################ -## Start and stop the Pact Mock Server daemon ## -############################################################ - LIBDIR=$(dirname "$0") . "${LIBDIR}/lib" +curDir=${pwd} trap shutdown INT -CUR_DIR=$(pwd) exitCode=0 function shutdown() { - step "Shutting down stub server" - log "Finding Pact daemon PID" - PID=$(ps -ef | grep "pact-go daemon"| grep -v grep | awk -F" " '{print $2}' | head -n 1) - if [ "${PID}" != "" ]; then - log "Killing ${PID}" - kill $PID - fi - cd $CUR_DIR - if [ "${exitCode}" != "0" ]; then log "Reviewing log output: " cat logs/* fi } -if [ ! -f "dist/pact-go" ]; then - cd dist - platform=$(detect_os) - archive="pact-go_${platform}_amd64.tar.gz" - step "Installing Pact Go for ${platform}" - - if [ ! -f "${archive}" ]; then - log "Cannot find distribution package ${archive}, please run 'make package' first" - exit 1 - fi - - log "Expanding archive" - if [[ $platform == 'linux' ]]; then - tar -xf $archive - elif [[ $platform == 'darwin' ]]; then - tar -xf $archive - else - log "Unsupported platform ${platform}" - exit 1 - fi - +if [ ! -d "build/pact" ]; then + step "Installing CLI tools locally" + mkdir -p build/pact + cd build + curl -fsSL https://raw.githubusercontent.com/pact-foundation/pact-ruby-standalone/master/install.sh | bash log "Done!" fi -cd $CUR_DIR - -step "Starting Daemon" -mkdir -p ./logs -./dist/pact-go daemon -v -l DEBUG > logs/daemon.log 2>&1 & +cd ${curDir} +export PATH="./bin/pact:${PATH}" export PACT_INTEGRATED_TESTS=1 export PACT_BROKER_HOST="https://test.pact.dius.com.au" export PACT_BROKER_USERNAME="dXfltyFMgNOFZAxr8io9wJ37iUpY42M" diff --git a/types/mock_server.go b/types/mock_server.go index 70d6704e7..3b746c7b8 100644 --- a/types/mock_server.go +++ b/types/mock_server.go @@ -2,8 +2,8 @@ package types // MockServer contains the RPC client interface to a Mock Server type MockServer struct { - Pid int - Port int - Status int - Args []string + Pid int + Port int + Error error + Args []string } diff --git a/vendor/github.com/gin-contrib/sse/.travis.yml b/vendor/github.com/gin-contrib/sse/.travis.yml new file mode 100644 index 000000000..a556ac09e --- /dev/null +++ b/vendor/github.com/gin-contrib/sse/.travis.yml @@ -0,0 +1,15 @@ +language: go +sudo: false +go: + - 1.6.4 + - 1.7.4 + - tip + +git: + depth: 3 + +script: + - go test -v -covermode=count -coverprofile=coverage.out + +after_success: + - bash <(curl -s https://codecov.io/bash) \ No newline at end of file diff --git a/vendor/github.com/gin-contrib/sse/LICENSE b/vendor/github.com/gin-contrib/sse/LICENSE new file mode 100644 index 000000000..1ff7f3706 --- /dev/null +++ b/vendor/github.com/gin-contrib/sse/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Manuel Martínez-Almeida + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/gin-contrib/sse/README.md b/vendor/github.com/gin-contrib/sse/README.md new file mode 100644 index 000000000..c9c49cf94 --- /dev/null +++ b/vendor/github.com/gin-contrib/sse/README.md @@ -0,0 +1,58 @@ +# Server-Sent Events + +[![GoDoc](https://godoc.org/github.com/gin-contrib/sse?status.svg)](https://godoc.org/github.com/gin-contrib/sse) +[![Build Status](https://travis-ci.org/gin-contrib/sse.svg)](https://travis-ci.org/gin-contrib/sse) +[![codecov](https://codecov.io/gh/gin-contrib/sse/branch/master/graph/badge.svg)](https://codecov.io/gh/gin-contrib/sse) +[![Go Report Card](https://goreportcard.com/badge/github.com/gin-contrib/sse)](https://goreportcard.com/report/github.com/gin-contrib/sse) + +Server-sent events (SSE) is a technology where a browser receives automatic updates from a server via HTTP connection. The Server-Sent Events EventSource API is [standardized as part of HTML5[1] by the W3C](http://www.w3.org/TR/2009/WD-eventsource-20091029/). + +- [Read this great SSE introduction by the HTML5Rocks guys](http://www.html5rocks.com/en/tutorials/eventsource/basics/) +- [Browser support](http://caniuse.com/#feat=eventsource) + +## Sample code + +```go +import "github.com/gin-contrib/sse" + +func httpHandler(w http.ResponseWriter, req *http.Request) { + // data can be a primitive like a string, an integer or a float + sse.Encode(w, sse.Event{ + Event: "message", + Data: "some data\nmore data", + }) + + // also a complex type, like a map, a struct or a slice + sse.Encode(w, sse.Event{ + Id: "124", + Event: "message", + Data: map[string]interface{}{ + "user": "manu", + "date": time.Now().Unix(), + "content": "hi!", + }, + }) +} +``` +``` +event: message +data: some data\\nmore data + +id: 124 +event: message +data: {"content":"hi!","date":1431540810,"user":"manu"} + +``` + +## Content-Type + +```go +fmt.Println(sse.ContentType) +``` +``` +text/event-stream +``` + +## Decoding support + +There is a client-side implementation of SSE coming soon. diff --git a/vendor/github.com/gin-contrib/sse/sse-decoder.go b/vendor/github.com/gin-contrib/sse/sse-decoder.go new file mode 100644 index 000000000..fd49b9c37 --- /dev/null +++ b/vendor/github.com/gin-contrib/sse/sse-decoder.go @@ -0,0 +1,116 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package sse + +import ( + "bytes" + "io" + "io/ioutil" +) + +type decoder struct { + events []Event +} + +func Decode(r io.Reader) ([]Event, error) { + var dec decoder + return dec.decode(r) +} + +func (d *decoder) dispatchEvent(event Event, data string) { + dataLength := len(data) + if dataLength > 0 { + //If the data buffer's last character is a U+000A LINE FEED (LF) character, then remove the last character from the data buffer. + data = data[:dataLength-1] + dataLength-- + } + if dataLength == 0 && event.Event == "" { + return + } + if event.Event == "" { + event.Event = "message" + } + event.Data = data + d.events = append(d.events, event) +} + +func (d *decoder) decode(r io.Reader) ([]Event, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + var currentEvent Event + var dataBuffer *bytes.Buffer = new(bytes.Buffer) + // TODO (and unit tests) + // Lines must be separated by either a U+000D CARRIAGE RETURN U+000A LINE FEED (CRLF) character pair, + // a single U+000A LINE FEED (LF) character, + // or a single U+000D CARRIAGE RETURN (CR) character. + lines := bytes.Split(buf, []byte{'\n'}) + for _, line := range lines { + if len(line) == 0 { + // If the line is empty (a blank line). Dispatch the event. + d.dispatchEvent(currentEvent, dataBuffer.String()) + + // reset current event and data buffer + currentEvent = Event{} + dataBuffer.Reset() + continue + } + if line[0] == byte(':') { + // If the line starts with a U+003A COLON character (:), ignore the line. + continue + } + + var field, value []byte + colonIndex := bytes.IndexRune(line, ':') + if colonIndex != -1 { + // If the line contains a U+003A COLON character character (:) + // Collect the characters on the line before the first U+003A COLON character (:), + // and let field be that string. + field = line[:colonIndex] + // Collect the characters on the line after the first U+003A COLON character (:), + // and let value be that string. + value = line[colonIndex+1:] + // If value starts with a single U+0020 SPACE character, remove it from value. + if len(value) > 0 && value[0] == ' ' { + value = value[1:] + } + } else { + // Otherwise, the string is not empty but does not contain a U+003A COLON character character (:) + // Use the whole line as the field name, and the empty string as the field value. + field = line + value = []byte{} + } + // The steps to process the field given a field name and a field value depend on the field name, + // as given in the following list. Field names must be compared literally, + // with no case folding performed. + switch string(field) { + case "event": + // Set the event name buffer to field value. + currentEvent.Event = string(value) + case "id": + // Set the event stream's last event ID to the field value. + currentEvent.Id = string(value) + case "retry": + // If the field value consists of only characters in the range U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9), + // then interpret the field value as an integer in base ten, and set the event stream's reconnection time to that integer. + // Otherwise, ignore the field. + currentEvent.Id = string(value) + case "data": + // Append the field value to the data buffer, + dataBuffer.Write(value) + // then append a single U+000A LINE FEED (LF) character to the data buffer. + dataBuffer.WriteString("\n") + default: + //Otherwise. The field is ignored. + continue + } + } + // Once the end of the file is reached, the user agent must dispatch the event one final time. + d.dispatchEvent(currentEvent, dataBuffer.String()) + + return d.events, nil +} diff --git a/vendor/github.com/gin-contrib/sse/sse-decoder_test.go b/vendor/github.com/gin-contrib/sse/sse-decoder_test.go new file mode 100644 index 000000000..068107b64 --- /dev/null +++ b/vendor/github.com/gin-contrib/sse/sse-decoder_test.go @@ -0,0 +1,116 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package sse + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDecodeSingle1(t *testing.T) { + events, err := Decode(bytes.NewBufferString( + `data: this is a text +event: message +fake: +id: 123456789010 +: we can append data +: and multiple comments should not break it +data: a very nice one`)) + + assert.NoError(t, err) + assert.Len(t, events, 1) + assert.Equal(t, events[0].Event, "message") + assert.Equal(t, events[0].Id, "123456789010") +} + +func TestDecodeSingle2(t *testing.T) { + events, err := Decode(bytes.NewBufferString( + `: starting with a comment +fake: + +data:this is a \ntext +event:a message\n\n +fake +:and multiple comments\n should not break it\n\n +id:1234567890\n10 +:we can append data +data:a very nice one\n! + + +`)) + assert.NoError(t, err) + assert.Len(t, events, 1) + assert.Equal(t, events[0].Event, "a message\\n\\n") + assert.Equal(t, events[0].Id, "1234567890\\n10") +} + +func TestDecodeSingle3(t *testing.T) { + events, err := Decode(bytes.NewBufferString( + ` +id:123456ABCabc789010 +event: message123 +: we can append data +data:this is a text +data: a very nice one +data: +data +: ending with a comment`)) + + assert.NoError(t, err) + assert.Len(t, events, 1) + assert.Equal(t, events[0].Event, "message123") + assert.Equal(t, events[0].Id, "123456ABCabc789010") +} + +func TestDecodeMulti1(t *testing.T) { + events, err := Decode(bytes.NewBufferString( + ` +id: +event: weird event +data:this is a text +:data: this should NOT APER +data: second line + +: a comment +event: message +id:123 +data:this is a text +:data: this should NOT APER +data: second line + + +: a comment +event: message +id:123 +data:this is a text +data: second line + +:hola + +data + +event: + +id`)) + assert.NoError(t, err) + assert.Len(t, events, 3) + assert.Equal(t, events[0].Event, "weird event") + assert.Equal(t, events[0].Id, "") +} + +func TestDecodeW3C(t *testing.T) { + events, err := Decode(bytes.NewBufferString( + `data + +data +data + +data: +`)) + assert.NoError(t, err) + assert.Len(t, events, 1) +} diff --git a/vendor/github.com/gin-contrib/sse/sse-encoder.go b/vendor/github.com/gin-contrib/sse/sse-encoder.go new file mode 100644 index 000000000..f9c808750 --- /dev/null +++ b/vendor/github.com/gin-contrib/sse/sse-encoder.go @@ -0,0 +1,110 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package sse + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + "strings" +) + +// Server-Sent Events +// W3C Working Draft 29 October 2009 +// http://www.w3.org/TR/2009/WD-eventsource-20091029/ + +const ContentType = "text/event-stream" + +var contentType = []string{ContentType} +var noCache = []string{"no-cache"} + +var fieldReplacer = strings.NewReplacer( + "\n", "\\n", + "\r", "\\r") + +var dataReplacer = strings.NewReplacer( + "\n", "\ndata:", + "\r", "\\r") + +type Event struct { + Event string + Id string + Retry uint + Data interface{} +} + +func Encode(writer io.Writer, event Event) error { + w := checkWriter(writer) + writeId(w, event.Id) + writeEvent(w, event.Event) + writeRetry(w, event.Retry) + return writeData(w, event.Data) +} + +func writeId(w stringWriter, id string) { + if len(id) > 0 { + w.WriteString("id:") + fieldReplacer.WriteString(w, id) + w.WriteString("\n") + } +} + +func writeEvent(w stringWriter, event string) { + if len(event) > 0 { + w.WriteString("event:") + fieldReplacer.WriteString(w, event) + w.WriteString("\n") + } +} + +func writeRetry(w stringWriter, retry uint) { + if retry > 0 { + w.WriteString("retry:") + w.WriteString(strconv.FormatUint(uint64(retry), 10)) + w.WriteString("\n") + } +} + +func writeData(w stringWriter, data interface{}) error { + w.WriteString("data:") + switch kindOfData(data) { + case reflect.Struct, reflect.Slice, reflect.Map: + err := json.NewEncoder(w).Encode(data) + if err != nil { + return err + } + w.WriteString("\n") + default: + dataReplacer.WriteString(w, fmt.Sprint(data)) + w.WriteString("\n\n") + } + return nil +} + +func (r Event) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + return Encode(w, r) +} + +func (r Event) WriteContentType(w http.ResponseWriter) { + header := w.Header() + header["Content-Type"] = contentType + + if _, exist := header["Cache-Control"]; !exist { + header["Cache-Control"] = noCache + } +} + +func kindOfData(data interface{}) reflect.Kind { + value := reflect.ValueOf(data) + valueType := value.Kind() + if valueType == reflect.Ptr { + valueType = value.Elem().Kind() + } + return valueType +} diff --git a/vendor/github.com/gin-contrib/sse/sse_test.go b/vendor/github.com/gin-contrib/sse/sse_test.go new file mode 100644 index 000000000..61b685b0e --- /dev/null +++ b/vendor/github.com/gin-contrib/sse/sse_test.go @@ -0,0 +1,255 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package sse + +import ( + "bytes" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEncodeOnlyData(t *testing.T) { + w := new(bytes.Buffer) + event := Event{ + Data: "junk\n\njk\nid:fake", + } + err := Encode(w, event) + assert.NoError(t, err) + assert.Equal(t, w.String(), + `data:junk +data: +data:jk +data:id:fake + +`) + + decoded, _ := Decode(w) + assert.Equal(t, decoded, []Event{event}) +} + +func TestEncodeWithEvent(t *testing.T) { + w := new(bytes.Buffer) + event := Event{ + Event: "t\n:<>\r\test", + Data: "junk\n\njk\nid:fake", + } + err := Encode(w, event) + assert.NoError(t, err) + assert.Equal(t, w.String(), + `event:t\n:<>\r est +data:junk +data: +data:jk +data:id:fake + +`) + + decoded, _ := Decode(w) + assert.Equal(t, decoded, []Event{event}) +} + +func TestEncodeWithId(t *testing.T) { + w := new(bytes.Buffer) + err := Encode(w, Event{ + Id: "t\n:<>\r\test", + Data: "junk\n\njk\nid:fa\rke", + }) + assert.NoError(t, err) + assert.Equal(t, w.String(), + `id:t\n:<>\r est +data:junk +data: +data:jk +data:id:fa\rke + +`) +} + +func TestEncodeWithRetry(t *testing.T) { + w := new(bytes.Buffer) + err := Encode(w, Event{ + Retry: 11, + Data: "junk\n\njk\nid:fake\n", + }) + assert.NoError(t, err) + assert.Equal(t, w.String(), + `retry:11 +data:junk +data: +data:jk +data:id:fake +data: + +`) +} + +func TestEncodeWithEverything(t *testing.T) { + w := new(bytes.Buffer) + err := Encode(w, Event{ + Event: "abc", + Id: "12345", + Retry: 10, + Data: "some data", + }) + assert.NoError(t, err) + assert.Equal(t, w.String(), "id:12345\nevent:abc\nretry:10\ndata:some data\n\n") +} + +func TestEncodeMap(t *testing.T) { + w := new(bytes.Buffer) + err := Encode(w, Event{ + Event: "a map", + Data: map[string]interface{}{ + "foo": "b\n\rar", + "bar": "id: 2", + }, + }) + assert.NoError(t, err) + assert.Equal(t, w.String(), "event:a map\ndata:{\"bar\":\"id: 2\",\"foo\":\"b\\n\\rar\"}\n\n") +} + +func TestEncodeSlice(t *testing.T) { + w := new(bytes.Buffer) + err := Encode(w, Event{ + Event: "a slice", + Data: []interface{}{1, "text", map[string]interface{}{"foo": "bar"}}, + }) + assert.NoError(t, err) + assert.Equal(t, w.String(), "event:a slice\ndata:[1,\"text\",{\"foo\":\"bar\"}]\n\n") +} + +func TestEncodeStruct(t *testing.T) { + myStruct := struct { + A int + B string `json:"value"` + }{1, "number"} + + w := new(bytes.Buffer) + err := Encode(w, Event{ + Event: "a struct", + Data: myStruct, + }) + assert.NoError(t, err) + assert.Equal(t, w.String(), "event:a struct\ndata:{\"A\":1,\"value\":\"number\"}\n\n") + + w.Reset() + err = Encode(w, Event{ + Event: "a struct", + Data: &myStruct, + }) + assert.NoError(t, err) + assert.Equal(t, w.String(), "event:a struct\ndata:{\"A\":1,\"value\":\"number\"}\n\n") +} + +func TestEncodeInteger(t *testing.T) { + w := new(bytes.Buffer) + err := Encode(w, Event{ + Event: "an integer", + Data: 1, + }) + assert.NoError(t, err) + assert.Equal(t, w.String(), "event:an integer\ndata:1\n\n") +} + +func TestEncodeFloat(t *testing.T) { + w := new(bytes.Buffer) + err := Encode(w, Event{ + Event: "Float", + Data: 1.5, + }) + assert.NoError(t, err) + assert.Equal(t, w.String(), "event:Float\ndata:1.5\n\n") +} + +func TestEncodeStream(t *testing.T) { + w := new(bytes.Buffer) + + Encode(w, Event{ + Event: "float", + Data: 1.5, + }) + + Encode(w, Event{ + Id: "123", + Data: map[string]interface{}{"foo": "bar", "bar": "foo"}, + }) + + Encode(w, Event{ + Id: "124", + Event: "chat", + Data: "hi! dude", + }) + assert.Equal(t, w.String(), "event:float\ndata:1.5\n\nid:123\ndata:{\"bar\":\"foo\",\"foo\":\"bar\"}\n\nid:124\nevent:chat\ndata:hi! dude\n\n") +} + +func TestRenderSSE(t *testing.T) { + w := httptest.NewRecorder() + + err := (Event{ + Event: "msg", + Data: "hi! how are you?", + }).Render(w) + + assert.NoError(t, err) + assert.Equal(t, w.Body.String(), "event:msg\ndata:hi! how are you?\n\n") + assert.Equal(t, w.Header().Get("Content-Type"), "text/event-stream") + assert.Equal(t, w.Header().Get("Cache-Control"), "no-cache") +} + +func BenchmarkResponseWriter(b *testing.B) { + w := httptest.NewRecorder() + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + (Event{ + Event: "new_message", + Data: "hi! how are you? I am fine. this is a long stupid message!!!", + }).Render(w) + } +} + +func BenchmarkFullSSE(b *testing.B) { + buf := new(bytes.Buffer) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + Encode(buf, Event{ + Event: "new_message", + Id: "13435", + Retry: 10, + Data: "hi! how are you? I am fine. this is a long stupid message!!!", + }) + buf.Reset() + } +} + +func BenchmarkNoRetrySSE(b *testing.B) { + buf := new(bytes.Buffer) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + Encode(buf, Event{ + Event: "new_message", + Id: "13435", + Data: "hi! how are you? I am fine. this is a long stupid message!!!", + }) + buf.Reset() + } +} + +func BenchmarkSimpleSSE(b *testing.B) { + buf := new(bytes.Buffer) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + Encode(buf, Event{ + Event: "new_message", + Data: "hi! how are you? I am fine. this is a long stupid message!!!", + }) + buf.Reset() + } +} diff --git a/vendor/github.com/gin-contrib/sse/writer.go b/vendor/github.com/gin-contrib/sse/writer.go new file mode 100644 index 000000000..6f9806c55 --- /dev/null +++ b/vendor/github.com/gin-contrib/sse/writer.go @@ -0,0 +1,24 @@ +package sse + +import "io" + +type stringWriter interface { + io.Writer + WriteString(string) (int, error) +} + +type stringWrapper struct { + io.Writer +} + +func (w stringWrapper) WriteString(str string) (int, error) { + return w.Writer.Write([]byte(str)) +} + +func checkWriter(writer io.Writer) stringWriter { + if w, ok := writer.(stringWriter); ok { + return w + } else { + return stringWrapper{writer} + } +} diff --git a/vendor/github.com/gin-gonic/gin/.travis.yml b/vendor/github.com/gin-gonic/gin/.travis.yml new file mode 100644 index 000000000..63b5d1131 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/.travis.yml @@ -0,0 +1,35 @@ +language: go +sudo: false +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - master + +git: + depth: 3 + +install: + - make install + +go_import_path: github.com/gin-gonic/gin + +script: + - make vet + - make fmt-check + - make embedmd + - make misspell-check + - make test + +after_success: + - bash <(curl -s https://codecov.io/bash) + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/7f95bf605c4d356372f4 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: false # default: false diff --git a/vendor/github.com/gin-gonic/gin/AUTHORS.md b/vendor/github.com/gin-gonic/gin/AUTHORS.md new file mode 100644 index 000000000..7ab7213d9 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/AUTHORS.md @@ -0,0 +1,227 @@ +List of all the awesome people working to make Gin the best Web Framework in Go. + +## gin 0.x series authors + +**Maintainer:** Manu Martinez-Almeida (@manucorporat), Javier Provecho (@javierprovecho) + +People and companies, who have contributed, in alphabetical order. + +**@858806258 (杰哥)** +- Fix typo in example + + +**@achedeuzot (Klemen Sever)** +- Fix newline debug printing + + +**@adammck (Adam Mckaig)** +- Add MIT license + + +**@AlexanderChen1989 (Alexander)** +- Typos in README + + +**@alexanderdidenko (Aleksandr Didenko)** +- Add support multipart/form-data + + +**@alexandernyquist (Alexander Nyquist)** +- Using template.Must to fix multiple return issue +- ★ Added support for OPTIONS verb +- ★ Setting response headers before calling WriteHeader +- Improved documentation for model binding +- ★ Added Content.Redirect() +- ★ Added tons of Unit tests + + +**@austinheap (Austin Heap)** +- Added travis CI integration + + +**@andredublin (Andre Dublin)** +- Fix typo in comment + + +**@bredov (Ludwig Valda Vasquez)** +- Fix html templating in debug mode + + +**@bluele (Jun Kimura)** +- Fixes code examples in README + + +**@chad-russell** +- ★ Support for serializing gin.H into XML + + +**@dickeyxxx (Jeff Dickey)** +- Typos in README +- Add example about serving static files + + +**@donileo (Adonis)** +- Add NoMethod handler + + +**@dutchcoders (DutchCoders)** +- ★ Fix security bug that allows client to spoof ip +- Fix typo. r.HTMLTemplates -> SetHTMLTemplate + + +**@el3ctro- (Joshua Loper)** +- Fix typo in example + + +**@ethankan (Ethan Kan)** +- Unsigned integers in binding + + +**(Evgeny Persienko)** +- Validate sub structures + + +**@frankbille (Frank Bille)** +- Add support for HTTP Realm Auth + + +**@fmd (Fareed Dudhia)** +- Fix typo. SetHTTPTemplate -> SetHTMLTemplate + + +**@ironiridis (Christopher Harrington)** +- Remove old reference + + +**@jammie-stackhouse (Jamie Stackhouse)** +- Add more shortcuts for router methods + + +**@jasonrhansen** +- Fix spelling and grammar errors in documentation + + +**@JasonSoft (Jason Lee)** +- Fix typo in comment + + +**@joiggama (Ignacio Galindo)** +- Add utf-8 charset header on renders + + +**@julienschmidt (Julien Schmidt)** +- gofmt the code examples + + +**@kelcecil (Kel Cecil)** +- Fix readme typo + + +**@kyledinh (Kyle Dinh)** +- Adds RunTLS() + + +**@LinusU (Linus Unnebäck)** +- Small fixes in README + + +**@loongmxbt (Saint Asky)** +- Fix typo in example + + +**@lucas-clemente (Lucas Clemente)** +- ★ work around path.Join removing trailing slashes from routes + + +**@mattn (Yasuhiro Matsumoto)** +- Improve color logger + + +**@mdigger (Dmitry Sedykh)** +- Fixes Form binding when content-type is x-www-form-urlencoded +- No repeat call c.Writer.Status() in gin.Logger +- Fixes Content-Type for json render + + +**@mirzac (Mirza Ceric)** +- Fix debug printing + + +**@mopemope (Yutaka Matsubara)** +- ★ Adds Godep support (Dependencies Manager) +- Fix variadic parameter in the flexible render API +- Fix Corrupted plain render +- Add Pluggable View Renderer Example + + +**@msemenistyi (Mykyta Semenistyi)** +- update Readme.md. Add code to String method + + +**@msoedov (Sasha Myasoedov)** +- ★ Adds tons of unit tests. + + +**@ngerakines (Nick Gerakines)** +- ★ Improves API, c.GET() doesn't panic +- Adds MustGet() method + + +**@r8k (Rajiv Kilaparti)** +- Fix Port usage in README. + + +**@rayrod2030 (Ray Rodriguez)** +- Fix typo in example + + +**@rns** +- Fix typo in example + + +**@RobAWilkinson (Robert Wilkinson)** +- Add example of forms and params + + +**@rogierlommers (Rogier Lommers)** +- Add updated static serve example + + +**@se77en (Damon Zhao)** +- Improve color logging + + +**@silasb (Silas Baronda)** +- Fixing quotes in README + + +**@SkuliOskarsson (Skuli Oskarsson)** +- Fixes some texts in README II + + +**@slimmy (Jimmy Pettersson)** +- Added messages for required bindings + + +**@smira (Andrey Smirnov)** +- Add support for ignored/unexported fields in binding + + +**@superalsrk (SRK.Lyu)** +- Update httprouter godeps + + +**@tebeka (Miki Tebeka)** +- Use net/http constants instead of numeric values + + +**@techjanitor** +- Update context.go reserved IPs + + +**@yosssi (Keiji Yoshida)** +- Fix link in README + + +**@yuyabee** +- Fixed README diff --git a/vendor/github.com/gin-gonic/gin/BENCHMARKS.md b/vendor/github.com/gin-gonic/gin/BENCHMARKS.md new file mode 100644 index 000000000..9a7df86a3 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/BENCHMARKS.md @@ -0,0 +1,604 @@ + +## Benchmark System + +**VM HOST:** DigitalOcean +**Machine:** 4 CPU, 8 GB RAM. Ubuntu 16.04.2 x64 +**Date:** July 19th, 2017 +**Go Version:** 1.8.3 linux/amd64 +**Source:** [Go HTTP Router Benchmark](https://github.com/julienschmidt/go-http-routing-benchmark) + +## Static Routes: 157 + +``` +Gin: 30512 Bytes + +HttpServeMux: 17344 Bytes +Ace: 30080 Bytes +Bear: 30472 Bytes +Beego: 96408 Bytes +Bone: 37904 Bytes +Denco: 10464 Bytes +Echo: 73680 Bytes +GocraftWeb: 55720 Bytes +Goji: 27200 Bytes +Gojiv2: 104464 Bytes +GoJsonRest: 136472 Bytes +GoRestful: 914904 Bytes +GorillaMux: 675568 Bytes +HttpRouter: 21128 Bytes +HttpTreeMux: 73448 Bytes +Kocha: 115072 Bytes +LARS: 30120 Bytes +Macaron: 37984 Bytes +Martini: 310832 Bytes +Pat: 20464 Bytes +Possum: 91328 Bytes +R2router: 23712 Bytes +Rivet: 23880 Bytes +Tango: 28008 Bytes +TigerTonic: 80368 Bytes +Traffic: 626480 Bytes +Vulcan: 369064 Bytes +``` + +## GithubAPI Routes: 203 + +``` +Gin: 52672 Bytes + +Ace: 48992 Bytes +Bear: 161592 Bytes +Beego: 147992 Bytes +Bone: 97728 Bytes +Denco: 36440 Bytes +Echo: 95672 Bytes +GocraftWeb: 95640 Bytes +Goji: 86088 Bytes +Gojiv2: 144392 Bytes +GoJsonRest: 134648 Bytes +GoRestful: 1410760 Bytes +GorillaMux: 1509488 Bytes +HttpRouter: 37464 Bytes +HttpTreeMux: 78800 Bytes +Kocha: 785408 Bytes +LARS: 49032 Bytes +Macaron: 132712 Bytes +Martini: 564352 Bytes +Pat: 21200 Bytes +Possum: 83888 Bytes +R2router: 47104 Bytes +Rivet: 42840 Bytes +Tango: 54584 Bytes +TigerTonic: 96384 Bytes +Traffic: 1061920 Bytes +Vulcan: 465296 Bytes +``` + +## GPlusAPI Routes: 13 + +``` +Gin: 3968 Bytes + +Ace: 3600 Bytes +Bear: 7112 Bytes +Beego: 10048 Bytes +Bone: 6480 Bytes +Denco: 3256 Bytes +Echo: 9000 Bytes +GocraftWeb: 7496 Bytes +Goji: 2912 Bytes +Gojiv2: 7376 Bytes +GoJsonRest: 11544 Bytes +GoRestful: 88776 Bytes +GorillaMux: 71488 Bytes +HttpRouter: 2712 Bytes +HttpTreeMux: 7440 Bytes +Kocha: 128880 Bytes +LARS: 3640 Bytes +Macaron: 8656 Bytes +Martini: 23936 Bytes +Pat: 1856 Bytes +Possum: 7248 Bytes +R2router: 3928 Bytes +Rivet: 3064 Bytes +Tango: 4912 Bytes +TigerTonic: 9408 Bytes +Traffic: 49472 Bytes +Vulcan: 25496 Bytes +``` + +## ParseAPI Routes: 26 + +``` +Gin: 6928 Bytes + +Ace: 6592 Bytes +Bear: 12320 Bytes +Beego: 18960 Bytes +Bone: 11024 Bytes +Denco: 4184 Bytes +Echo: 11168 Bytes +GocraftWeb: 12800 Bytes +Goji: 5232 Bytes +Gojiv2: 14464 Bytes +GoJsonRest: 14216 Bytes +GoRestful: 127368 Bytes +GorillaMux: 123016 Bytes +HttpRouter: 4976 Bytes +HttpTreeMux: 7848 Bytes +Kocha: 181712 Bytes +LARS: 6632 Bytes +Macaron: 13648 Bytes +Martini: 45952 Bytes +Pat: 2560 Bytes +Possum: 9200 Bytes +R2router: 7056 Bytes +Rivet: 5680 Bytes +Tango: 8664 Bytes +TigerTonic: 9840 Bytes +Traffic: 93480 Bytes +Vulcan: 44504 Bytes +``` + +## Static Routes + +``` +BenchmarkGin_StaticAll 50000 34506 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_StaticAll 30000 49657 ns/op 0 B/op 0 allocs/op +BenchmarkHttpServeMux_StaticAll 2000 1183737 ns/op 96 B/op 8 allocs/op +BenchmarkBeego_StaticAll 5000 412621 ns/op 57776 B/op 628 allocs/op +BenchmarkBear_StaticAll 10000 149242 ns/op 20336 B/op 461 allocs/op +BenchmarkBone_StaticAll 10000 118583 ns/op 0 B/op 0 allocs/op +BenchmarkDenco_StaticAll 100000 13247 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_StaticAll 20000 79914 ns/op 5024 B/op 157 allocs/op +BenchmarkGocraftWeb_StaticAll 10000 211823 ns/op 46440 B/op 785 allocs/op +BenchmarkGoji_StaticAll 10000 109390 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_StaticAll 3000 415533 ns/op 145696 B/op 1099 allocs/op +BenchmarkGoJsonRest_StaticAll 5000 364403 ns/op 51653 B/op 1727 allocs/op +BenchmarkGoRestful_StaticAll 500 2578579 ns/op 314936 B/op 3144 allocs/op +BenchmarkGorillaMux_StaticAll 500 2704856 ns/op 115648 B/op 1578 allocs/op +BenchmarkHttpRouter_StaticAll 100000 18541 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_StaticAll 100000 22332 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_StaticAll 50000 31176 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_StaticAll 50000 40840 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_StaticAll 5000 517656 ns/op 120576 B/op 1413 allocs/op +BenchmarkMartini_StaticAll 300 4462289 ns/op 125442 B/op 1717 allocs/op +BenchmarkPat_StaticAll 500 2157275 ns/op 533904 B/op 11123 allocs/op +BenchmarkPossum_StaticAll 10000 254701 ns/op 65312 B/op 471 allocs/op +BenchmarkR2router_StaticAll 10000 133956 ns/op 22608 B/op 628 allocs/op +BenchmarkRivet_StaticAll 30000 46812 ns/op 0 B/op 0 allocs/op +BenchmarkTango_StaticAll 5000 390613 ns/op 39225 B/op 1256 allocs/op +BenchmarkTigerTonic_StaticAll 20000 88060 ns/op 7504 B/op 157 allocs/op +BenchmarkTraffic_StaticAll 500 2910236 ns/op 729736 B/op 14287 allocs/op +BenchmarkVulcan_StaticAll 5000 277366 ns/op 15386 B/op 471 allocs/op +``` + +## Micro Benchmarks + +``` +BenchmarkGin_Param 20000000 113 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_Param 5000000 375 ns/op 32 B/op 1 allocs/op +BenchmarkBear_Param 1000000 1709 ns/op 456 B/op 5 allocs/op +BenchmarkBeego_Param 1000000 2484 ns/op 368 B/op 4 allocs/op +BenchmarkBone_Param 1000000 2391 ns/op 688 B/op 5 allocs/op +BenchmarkDenco_Param 10000000 240 ns/op 32 B/op 1 allocs/op +BenchmarkEcho_Param 5000000 366 ns/op 32 B/op 1 allocs/op +BenchmarkGocraftWeb_Param 1000000 2343 ns/op 648 B/op 8 allocs/op +BenchmarkGoji_Param 1000000 1197 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_Param 1000000 2771 ns/op 944 B/op 8 allocs/op +BenchmarkGoJsonRest_Param 1000000 2993 ns/op 649 B/op 13 allocs/op +BenchmarkGoRestful_Param 200000 8860 ns/op 2296 B/op 21 allocs/op +BenchmarkGorillaMux_Param 500000 4461 ns/op 1056 B/op 11 allocs/op +BenchmarkHttpRouter_Param 10000000 175 ns/op 32 B/op 1 allocs/op +BenchmarkHttpTreeMux_Param 1000000 1167 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_Param 3000000 429 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_Param 10000000 134 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Param 500000 4635 ns/op 1056 B/op 10 allocs/op +BenchmarkMartini_Param 200000 9933 ns/op 1072 B/op 10 allocs/op +BenchmarkPat_Param 1000000 2929 ns/op 648 B/op 12 allocs/op +BenchmarkPossum_Param 1000000 2503 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_Param 1000000 1507 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_Param 5000000 297 ns/op 48 B/op 1 allocs/op +BenchmarkTango_Param 1000000 1862 ns/op 248 B/op 8 allocs/op +BenchmarkTigerTonic_Param 500000 5660 ns/op 992 B/op 17 allocs/op +BenchmarkTraffic_Param 200000 8408 ns/op 1960 B/op 21 allocs/op +BenchmarkVulcan_Param 2000000 963 ns/op 98 B/op 3 allocs/op +BenchmarkAce_Param5 2000000 740 ns/op 160 B/op 1 allocs/op +BenchmarkBear_Param5 1000000 2777 ns/op 501 B/op 5 allocs/op +BenchmarkBeego_Param5 1000000 3740 ns/op 368 B/op 4 allocs/op +BenchmarkBone_Param5 1000000 2950 ns/op 736 B/op 5 allocs/op +BenchmarkDenco_Param5 2000000 644 ns/op 160 B/op 1 allocs/op +BenchmarkEcho_Param5 3000000 558 ns/op 32 B/op 1 allocs/op +BenchmarkGin_Param5 10000000 198 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_Param5 500000 3870 ns/op 920 B/op 11 allocs/op +BenchmarkGoji_Param5 1000000 1746 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_Param5 1000000 3214 ns/op 1008 B/op 8 allocs/op +BenchmarkGoJsonRest_Param5 500000 5509 ns/op 1097 B/op 16 allocs/op +BenchmarkGoRestful_Param5 200000 11232 ns/op 2392 B/op 21 allocs/op +BenchmarkGorillaMux_Param5 300000 7777 ns/op 1184 B/op 11 allocs/op +BenchmarkHttpRouter_Param5 3000000 631 ns/op 160 B/op 1 allocs/op +BenchmarkHttpTreeMux_Param5 1000000 2800 ns/op 576 B/op 6 allocs/op +BenchmarkKocha_Param5 1000000 2053 ns/op 440 B/op 10 allocs/op +BenchmarkLARS_Param5 10000000 232 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Param5 500000 5888 ns/op 1056 B/op 10 allocs/op +BenchmarkMartini_Param5 200000 12807 ns/op 1232 B/op 11 allocs/op +BenchmarkPat_Param5 300000 7320 ns/op 964 B/op 32 allocs/op +BenchmarkPossum_Param5 1000000 2495 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_Param5 1000000 1844 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_Param5 2000000 935 ns/op 240 B/op 1 allocs/op +BenchmarkTango_Param5 1000000 2327 ns/op 360 B/op 8 allocs/op +BenchmarkTigerTonic_Param5 100000 18514 ns/op 2551 B/op 43 allocs/op +BenchmarkTraffic_Param5 200000 11997 ns/op 2248 B/op 25 allocs/op +BenchmarkVulcan_Param5 1000000 1333 ns/op 98 B/op 3 allocs/op +BenchmarkAce_Param20 1000000 2031 ns/op 640 B/op 1 allocs/op +BenchmarkBear_Param20 200000 7285 ns/op 1664 B/op 5 allocs/op +BenchmarkBeego_Param20 300000 6224 ns/op 368 B/op 4 allocs/op +BenchmarkBone_Param20 200000 8023 ns/op 1903 B/op 5 allocs/op +BenchmarkDenco_Param20 1000000 2262 ns/op 640 B/op 1 allocs/op +BenchmarkEcho_Param20 1000000 1387 ns/op 32 B/op 1 allocs/op +BenchmarkGin_Param20 3000000 503 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_Param20 100000 14408 ns/op 3795 B/op 15 allocs/op +BenchmarkGoji_Param20 500000 5272 ns/op 1247 B/op 2 allocs/op +BenchmarkGojiv2_Param20 1000000 4163 ns/op 1248 B/op 8 allocs/op +BenchmarkGoJsonRest_Param20 100000 17866 ns/op 4485 B/op 20 allocs/op +BenchmarkGoRestful_Param20 100000 21022 ns/op 4724 B/op 23 allocs/op +BenchmarkGorillaMux_Param20 100000 17055 ns/op 3547 B/op 13 allocs/op +BenchmarkHttpRouter_Param20 1000000 1748 ns/op 640 B/op 1 allocs/op +BenchmarkHttpTreeMux_Param20 200000 12246 ns/op 3196 B/op 10 allocs/op +BenchmarkKocha_Param20 300000 6861 ns/op 1808 B/op 27 allocs/op +BenchmarkLARS_Param20 3000000 526 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Param20 100000 13069 ns/op 2906 B/op 12 allocs/op +BenchmarkMartini_Param20 100000 23602 ns/op 3597 B/op 13 allocs/op +BenchmarkPat_Param20 50000 32143 ns/op 4688 B/op 111 allocs/op +BenchmarkPossum_Param20 1000000 2396 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_Param20 200000 8907 ns/op 2283 B/op 7 allocs/op +BenchmarkRivet_Param20 1000000 3280 ns/op 1024 B/op 1 allocs/op +BenchmarkTango_Param20 500000 4640 ns/op 856 B/op 8 allocs/op +BenchmarkTigerTonic_Param20 20000 67581 ns/op 10532 B/op 138 allocs/op +BenchmarkTraffic_Param20 50000 40313 ns/op 7941 B/op 45 allocs/op +BenchmarkVulcan_Param20 1000000 2264 ns/op 98 B/op 3 allocs/op +BenchmarkAce_ParamWrite 3000000 532 ns/op 40 B/op 2 allocs/op +BenchmarkBear_ParamWrite 1000000 1778 ns/op 456 B/op 5 allocs/op +BenchmarkBeego_ParamWrite 1000000 2596 ns/op 376 B/op 5 allocs/op +BenchmarkBone_ParamWrite 1000000 2519 ns/op 688 B/op 5 allocs/op +BenchmarkDenco_ParamWrite 5000000 411 ns/op 32 B/op 1 allocs/op +BenchmarkEcho_ParamWrite 2000000 718 ns/op 40 B/op 2 allocs/op +BenchmarkGin_ParamWrite 5000000 283 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_ParamWrite 1000000 2561 ns/op 656 B/op 9 allocs/op +BenchmarkGoji_ParamWrite 1000000 1378 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_ParamWrite 1000000 3128 ns/op 976 B/op 10 allocs/op +BenchmarkGoJsonRest_ParamWrite 500000 4446 ns/op 1128 B/op 18 allocs/op +BenchmarkGoRestful_ParamWrite 200000 10291 ns/op 2304 B/op 22 allocs/op +BenchmarkGorillaMux_ParamWrite 500000 5153 ns/op 1064 B/op 12 allocs/op +BenchmarkHttpRouter_ParamWrite 5000000 263 ns/op 32 B/op 1 allocs/op +BenchmarkHttpTreeMux_ParamWrite 1000000 1351 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_ParamWrite 3000000 538 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_ParamWrite 5000000 316 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParamWrite 500000 5756 ns/op 1160 B/op 14 allocs/op +BenchmarkMartini_ParamWrite 200000 13097 ns/op 1176 B/op 14 allocs/op +BenchmarkPat_ParamWrite 500000 4954 ns/op 1072 B/op 17 allocs/op +BenchmarkPossum_ParamWrite 1000000 2499 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_ParamWrite 1000000 1531 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_ParamWrite 3000000 570 ns/op 112 B/op 2 allocs/op +BenchmarkTango_ParamWrite 2000000 957 ns/op 136 B/op 4 allocs/op +BenchmarkTigerTonic_ParamWrite 200000 7025 ns/op 1424 B/op 23 allocs/op +BenchmarkTraffic_ParamWrite 200000 10112 ns/op 2384 B/op 25 allocs/op +BenchmarkVulcan_ParamWrite 1000000 1006 ns/op 98 B/op 3 allocs/op +``` + +## GitHub + +``` +BenchmarkGin_GithubStatic 10000000 156 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_GithubStatic 5000000 294 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GithubStatic 2000000 893 ns/op 120 B/op 3 allocs/op +BenchmarkBeego_GithubStatic 1000000 2491 ns/op 368 B/op 4 allocs/op +BenchmarkBone_GithubStatic 50000 25300 ns/op 2880 B/op 60 allocs/op +BenchmarkDenco_GithubStatic 20000000 76.0 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_GithubStatic 2000000 516 ns/op 32 B/op 1 allocs/op +BenchmarkGocraftWeb_GithubStatic 1000000 1448 ns/op 296 B/op 5 allocs/op +BenchmarkGoji_GithubStatic 3000000 496 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_GithubStatic 1000000 2941 ns/op 928 B/op 7 allocs/op +BenchmarkGoRestful_GithubStatic 100000 27256 ns/op 3224 B/op 22 allocs/op +BenchmarkGoJsonRest_GithubStatic 1000000 2196 ns/op 329 B/op 11 allocs/op +BenchmarkGorillaMux_GithubStatic 50000 31617 ns/op 736 B/op 10 allocs/op +BenchmarkHttpRouter_GithubStatic 20000000 88.4 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GithubStatic 10000000 134 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_GithubStatic 20000000 113 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_GithubStatic 10000000 195 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GithubStatic 500000 3740 ns/op 768 B/op 9 allocs/op +BenchmarkMartini_GithubStatic 50000 27673 ns/op 768 B/op 9 allocs/op +BenchmarkPat_GithubStatic 100000 19470 ns/op 3648 B/op 76 allocs/op +BenchmarkPossum_GithubStatic 1000000 1729 ns/op 416 B/op 3 allocs/op +BenchmarkR2router_GithubStatic 2000000 879 ns/op 144 B/op 4 allocs/op +BenchmarkRivet_GithubStatic 10000000 231 ns/op 0 B/op 0 allocs/op +BenchmarkTango_GithubStatic 1000000 2325 ns/op 248 B/op 8 allocs/op +BenchmarkTigerTonic_GithubStatic 3000000 610 ns/op 48 B/op 1 allocs/op +BenchmarkTraffic_GithubStatic 20000 62973 ns/op 18904 B/op 148 allocs/op +BenchmarkVulcan_GithubStatic 1000000 1447 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GithubParam 2000000 686 ns/op 96 B/op 1 allocs/op +BenchmarkBear_GithubParam 1000000 2155 ns/op 496 B/op 5 allocs/op +BenchmarkBeego_GithubParam 1000000 2713 ns/op 368 B/op 4 allocs/op +BenchmarkBone_GithubParam 100000 15088 ns/op 1760 B/op 18 allocs/op +BenchmarkDenco_GithubParam 2000000 629 ns/op 128 B/op 1 allocs/op +BenchmarkEcho_GithubParam 2000000 653 ns/op 32 B/op 1 allocs/op +BenchmarkGin_GithubParam 5000000 255 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GithubParam 1000000 3145 ns/op 712 B/op 9 allocs/op +BenchmarkGoji_GithubParam 1000000 1916 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_GithubParam 1000000 3975 ns/op 1024 B/op 10 allocs/op +BenchmarkGoJsonRest_GithubParam 300000 4134 ns/op 713 B/op 14 allocs/op +BenchmarkGoRestful_GithubParam 50000 30782 ns/op 2360 B/op 21 allocs/op +BenchmarkGorillaMux_GithubParam 100000 17148 ns/op 1088 B/op 11 allocs/op +BenchmarkHttpRouter_GithubParam 3000000 523 ns/op 96 B/op 1 allocs/op +BenchmarkHttpTreeMux_GithubParam 1000000 1671 ns/op 384 B/op 4 allocs/op +BenchmarkKocha_GithubParam 1000000 1021 ns/op 128 B/op 5 allocs/op +BenchmarkLARS_GithubParam 5000000 283 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GithubParam 500000 4270 ns/op 1056 B/op 10 allocs/op +BenchmarkMartini_GithubParam 100000 21728 ns/op 1152 B/op 11 allocs/op +BenchmarkPat_GithubParam 200000 11208 ns/op 2464 B/op 48 allocs/op +BenchmarkPossum_GithubParam 1000000 2334 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_GithubParam 1000000 1487 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_GithubParam 2000000 782 ns/op 96 B/op 1 allocs/op +BenchmarkTango_GithubParam 1000000 2653 ns/op 344 B/op 8 allocs/op +BenchmarkTigerTonic_GithubParam 300000 14073 ns/op 1440 B/op 24 allocs/op +BenchmarkTraffic_GithubParam 50000 29164 ns/op 5992 B/op 52 allocs/op +BenchmarkVulcan_GithubParam 1000000 2529 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GithubAll 10000 134059 ns/op 13792 B/op 167 allocs/op +BenchmarkBear_GithubAll 5000 534445 ns/op 86448 B/op 943 allocs/op +BenchmarkBeego_GithubAll 3000 592444 ns/op 74705 B/op 812 allocs/op +BenchmarkBone_GithubAll 200 6957308 ns/op 698784 B/op 8453 allocs/op +BenchmarkDenco_GithubAll 10000 158819 ns/op 20224 B/op 167 allocs/op +BenchmarkEcho_GithubAll 10000 154700 ns/op 6496 B/op 203 allocs/op +BenchmarkGin_GithubAll 30000 48375 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GithubAll 3000 570806 ns/op 131656 B/op 1686 allocs/op +BenchmarkGoji_GithubAll 2000 818034 ns/op 56112 B/op 334 allocs/op +BenchmarkGojiv2_GithubAll 2000 1213973 ns/op 274768 B/op 3712 allocs/op +BenchmarkGoJsonRest_GithubAll 2000 785796 ns/op 134371 B/op 2737 allocs/op +BenchmarkGoRestful_GithubAll 300 5238188 ns/op 689672 B/op 4519 allocs/op +BenchmarkGorillaMux_GithubAll 100 10257726 ns/op 211840 B/op 2272 allocs/op +BenchmarkHttpRouter_GithubAll 20000 105414 ns/op 13792 B/op 167 allocs/op +BenchmarkHttpTreeMux_GithubAll 10000 319934 ns/op 65856 B/op 671 allocs/op +BenchmarkKocha_GithubAll 10000 209442 ns/op 23304 B/op 843 allocs/op +BenchmarkLARS_GithubAll 20000 62565 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GithubAll 2000 1161270 ns/op 204194 B/op 2000 allocs/op +BenchmarkMartini_GithubAll 200 9991713 ns/op 226549 B/op 2325 allocs/op +BenchmarkPat_GithubAll 200 5590793 ns/op 1499568 B/op 27435 allocs/op +BenchmarkPossum_GithubAll 10000 319768 ns/op 84448 B/op 609 allocs/op +BenchmarkR2router_GithubAll 10000 305134 ns/op 77328 B/op 979 allocs/op +BenchmarkRivet_GithubAll 10000 132134 ns/op 16272 B/op 167 allocs/op +BenchmarkTango_GithubAll 3000 552754 ns/op 63826 B/op 1618 allocs/op +BenchmarkTigerTonic_GithubAll 1000 1439483 ns/op 239104 B/op 5374 allocs/op +BenchmarkTraffic_GithubAll 100 11383067 ns/op 2659329 B/op 21848 allocs/op +BenchmarkVulcan_GithubAll 5000 394253 ns/op 19894 B/op 609 allocs/op +``` + +## Google+ + +``` +BenchmarkGin_GPlusStatic 10000000 183 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_GPlusStatic 5000000 276 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GPlusStatic 2000000 652 ns/op 104 B/op 3 allocs/op +BenchmarkBeego_GPlusStatic 1000000 2239 ns/op 368 B/op 4 allocs/op +BenchmarkBone_GPlusStatic 5000000 380 ns/op 32 B/op 1 allocs/op +BenchmarkDenco_GPlusStatic 30000000 45.8 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_GPlusStatic 5000000 338 ns/op 32 B/op 1 allocs/op +BenchmarkGocraftWeb_GPlusStatic 1000000 1158 ns/op 280 B/op 5 allocs/op +BenchmarkGoji_GPlusStatic 5000000 331 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_GPlusStatic 1000000 2106 ns/op 928 B/op 7 allocs/op +BenchmarkGoJsonRest_GPlusStatic 1000000 1626 ns/op 329 B/op 11 allocs/op +BenchmarkGoRestful_GPlusStatic 300000 7598 ns/op 1976 B/op 20 allocs/op +BenchmarkGorillaMux_GPlusStatic 1000000 2629 ns/op 736 B/op 10 allocs/op +BenchmarkHttpRouter_GPlusStatic 30000000 52.5 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GPlusStatic 20000000 85.8 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_GPlusStatic 20000000 89.2 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_GPlusStatic 10000000 162 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlusStatic 500000 3479 ns/op 768 B/op 9 allocs/op +BenchmarkMartini_GPlusStatic 200000 9092 ns/op 768 B/op 9 allocs/op +BenchmarkPat_GPlusStatic 3000000 493 ns/op 96 B/op 2 allocs/op +BenchmarkPossum_GPlusStatic 1000000 1467 ns/op 416 B/op 3 allocs/op +BenchmarkR2router_GPlusStatic 2000000 788 ns/op 144 B/op 4 allocs/op +BenchmarkRivet_GPlusStatic 20000000 114 ns/op 0 B/op 0 allocs/op +BenchmarkTango_GPlusStatic 1000000 1534 ns/op 200 B/op 8 allocs/op +BenchmarkTigerTonic_GPlusStatic 5000000 282 ns/op 32 B/op 1 allocs/op +BenchmarkTraffic_GPlusStatic 500000 3798 ns/op 1192 B/op 15 allocs/op +BenchmarkVulcan_GPlusStatic 2000000 1125 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GPlusParam 3000000 528 ns/op 64 B/op 1 allocs/op +BenchmarkBear_GPlusParam 1000000 1570 ns/op 480 B/op 5 allocs/op +BenchmarkBeego_GPlusParam 1000000 2369 ns/op 368 B/op 4 allocs/op +BenchmarkBone_GPlusParam 1000000 2028 ns/op 688 B/op 5 allocs/op +BenchmarkDenco_GPlusParam 5000000 385 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_GPlusParam 3000000 441 ns/op 32 B/op 1 allocs/op +BenchmarkGin_GPlusParam 10000000 174 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GPlusParam 1000000 2033 ns/op 648 B/op 8 allocs/op +BenchmarkGoji_GPlusParam 1000000 1399 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_GPlusParam 1000000 2641 ns/op 944 B/op 8 allocs/op +BenchmarkGoJsonRest_GPlusParam 1000000 2824 ns/op 649 B/op 13 allocs/op +BenchmarkGoRestful_GPlusParam 200000 8875 ns/op 2296 B/op 21 allocs/op +BenchmarkGorillaMux_GPlusParam 200000 6291 ns/op 1056 B/op 11 allocs/op +BenchmarkHttpRouter_GPlusParam 5000000 316 ns/op 64 B/op 1 allocs/op +BenchmarkHttpTreeMux_GPlusParam 1000000 1129 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_GPlusParam 3000000 538 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_GPlusParam 10000000 198 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlusParam 500000 3554 ns/op 1056 B/op 10 allocs/op +BenchmarkMartini_GPlusParam 200000 9831 ns/op 1072 B/op 10 allocs/op +BenchmarkPat_GPlusParam 1000000 2706 ns/op 688 B/op 12 allocs/op +BenchmarkPossum_GPlusParam 1000000 2297 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_GPlusParam 1000000 1318 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_GPlusParam 5000000 399 ns/op 48 B/op 1 allocs/op +BenchmarkTango_GPlusParam 1000000 2070 ns/op 264 B/op 8 allocs/op +BenchmarkTigerTonic_GPlusParam 500000 4853 ns/op 1056 B/op 17 allocs/op +BenchmarkTraffic_GPlusParam 200000 8278 ns/op 1976 B/op 21 allocs/op +BenchmarkVulcan_GPlusParam 1000000 1243 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GPlus2Params 3000000 549 ns/op 64 B/op 1 allocs/op +BenchmarkBear_GPlus2Params 1000000 2112 ns/op 496 B/op 5 allocs/op +BenchmarkBeego_GPlus2Params 500000 2750 ns/op 368 B/op 4 allocs/op +BenchmarkBone_GPlus2Params 300000 7032 ns/op 1040 B/op 9 allocs/op +BenchmarkDenco_GPlus2Params 3000000 502 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_GPlus2Params 3000000 641 ns/op 32 B/op 1 allocs/op +BenchmarkGin_GPlus2Params 5000000 250 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GPlus2Params 1000000 2681 ns/op 712 B/op 9 allocs/op +BenchmarkGoji_GPlus2Params 1000000 1926 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_GPlus2Params 500000 3996 ns/op 1024 B/op 11 allocs/op +BenchmarkGoJsonRest_GPlus2Params 500000 3886 ns/op 713 B/op 14 allocs/op +BenchmarkGoRestful_GPlus2Params 200000 10376 ns/op 2360 B/op 21 allocs/op +BenchmarkGorillaMux_GPlus2Params 100000 14162 ns/op 1088 B/op 11 allocs/op +BenchmarkHttpRouter_GPlus2Params 5000000 336 ns/op 64 B/op 1 allocs/op +BenchmarkHttpTreeMux_GPlus2Params 1000000 1523 ns/op 384 B/op 4 allocs/op +BenchmarkKocha_GPlus2Params 2000000 970 ns/op 128 B/op 5 allocs/op +BenchmarkLARS_GPlus2Params 5000000 238 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlus2Params 500000 4016 ns/op 1056 B/op 10 allocs/op +BenchmarkMartini_GPlus2Params 100000 21253 ns/op 1200 B/op 13 allocs/op +BenchmarkPat_GPlus2Params 200000 8632 ns/op 2256 B/op 34 allocs/op +BenchmarkPossum_GPlus2Params 1000000 2171 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_GPlus2Params 1000000 1340 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_GPlus2Params 3000000 557 ns/op 96 B/op 1 allocs/op +BenchmarkTango_GPlus2Params 1000000 2186 ns/op 344 B/op 8 allocs/op +BenchmarkTigerTonic_GPlus2Params 200000 9060 ns/op 1488 B/op 24 allocs/op +BenchmarkTraffic_GPlus2Params 100000 20324 ns/op 3272 B/op 31 allocs/op +BenchmarkVulcan_GPlus2Params 1000000 2039 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GPlusAll 300000 6603 ns/op 640 B/op 11 allocs/op +BenchmarkBear_GPlusAll 100000 22363 ns/op 5488 B/op 61 allocs/op +BenchmarkBeego_GPlusAll 50000 38757 ns/op 4784 B/op 52 allocs/op +BenchmarkBone_GPlusAll 20000 54916 ns/op 10336 B/op 98 allocs/op +BenchmarkDenco_GPlusAll 300000 4959 ns/op 672 B/op 11 allocs/op +BenchmarkEcho_GPlusAll 200000 6558 ns/op 416 B/op 13 allocs/op +BenchmarkGin_GPlusAll 500000 2757 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GPlusAll 50000 34615 ns/op 8040 B/op 103 allocs/op +BenchmarkGoji_GPlusAll 100000 16002 ns/op 3696 B/op 22 allocs/op +BenchmarkGojiv2_GPlusAll 50000 35060 ns/op 12624 B/op 115 allocs/op +BenchmarkGoJsonRest_GPlusAll 50000 41479 ns/op 8117 B/op 170 allocs/op +BenchmarkGoRestful_GPlusAll 10000 131653 ns/op 32024 B/op 275 allocs/op +BenchmarkGorillaMux_GPlusAll 10000 101380 ns/op 13296 B/op 142 allocs/op +BenchmarkHttpRouter_GPlusAll 500000 3711 ns/op 640 B/op 11 allocs/op +BenchmarkHttpTreeMux_GPlusAll 100000 14438 ns/op 4032 B/op 38 allocs/op +BenchmarkKocha_GPlusAll 200000 8039 ns/op 976 B/op 43 allocs/op +BenchmarkLARS_GPlusAll 500000 2630 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlusAll 30000 51123 ns/op 13152 B/op 128 allocs/op +BenchmarkMartini_GPlusAll 10000 176157 ns/op 14016 B/op 145 allocs/op +BenchmarkPat_GPlusAll 20000 69911 ns/op 16576 B/op 298 allocs/op +BenchmarkPossum_GPlusAll 100000 20716 ns/op 5408 B/op 39 allocs/op +BenchmarkR2router_GPlusAll 100000 17463 ns/op 5040 B/op 63 allocs/op +BenchmarkRivet_GPlusAll 300000 5142 ns/op 768 B/op 11 allocs/op +BenchmarkTango_GPlusAll 50000 27321 ns/op 3656 B/op 104 allocs/op +BenchmarkTigerTonic_GPlusAll 20000 77597 ns/op 14512 B/op 288 allocs/op +BenchmarkTraffic_GPlusAll 10000 151406 ns/op 37360 B/op 392 allocs/op +BenchmarkVulcan_GPlusAll 100000 18555 ns/op 1274 B/op 39 allocs/op +``` + +## Parse.com + +``` +BenchmarkGin_ParseStatic 10000000 133 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_ParseStatic 5000000 241 ns/op 0 B/op 0 allocs/op +BenchmarkBear_ParseStatic 2000000 728 ns/op 120 B/op 3 allocs/op +BenchmarkBeego_ParseStatic 1000000 2623 ns/op 368 B/op 4 allocs/op +BenchmarkBone_ParseStatic 1000000 1285 ns/op 144 B/op 3 allocs/op +BenchmarkDenco_ParseStatic 30000000 57.8 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_ParseStatic 5000000 342 ns/op 32 B/op 1 allocs/op +BenchmarkGocraftWeb_ParseStatic 1000000 1478 ns/op 296 B/op 5 allocs/op +BenchmarkGoji_ParseStatic 3000000 415 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_ParseStatic 1000000 2087 ns/op 928 B/op 7 allocs/op +BenchmarkGoJsonRest_ParseStatic 1000000 1712 ns/op 329 B/op 11 allocs/op +BenchmarkGoRestful_ParseStatic 200000 11072 ns/op 3224 B/op 22 allocs/op +BenchmarkGorillaMux_ParseStatic 500000 4129 ns/op 752 B/op 11 allocs/op +BenchmarkHttpRouter_ParseStatic 30000000 52.4 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_ParseStatic 20000000 109 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_ParseStatic 20000000 81.8 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_ParseStatic 10000000 150 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParseStatic 1000000 3288 ns/op 768 B/op 9 allocs/op +BenchmarkMartini_ParseStatic 200000 9110 ns/op 768 B/op 9 allocs/op +BenchmarkPat_ParseStatic 1000000 1135 ns/op 240 B/op 5 allocs/op +BenchmarkPossum_ParseStatic 1000000 1557 ns/op 416 B/op 3 allocs/op +BenchmarkR2router_ParseStatic 2000000 730 ns/op 144 B/op 4 allocs/op +BenchmarkRivet_ParseStatic 10000000 121 ns/op 0 B/op 0 allocs/op +BenchmarkTango_ParseStatic 1000000 1688 ns/op 248 B/op 8 allocs/op +BenchmarkTigerTonic_ParseStatic 3000000 427 ns/op 48 B/op 1 allocs/op +BenchmarkTraffic_ParseStatic 500000 5962 ns/op 1816 B/op 20 allocs/op +BenchmarkVulcan_ParseStatic 2000000 969 ns/op 98 B/op 3 allocs/op +BenchmarkAce_ParseParam 3000000 497 ns/op 64 B/op 1 allocs/op +BenchmarkBear_ParseParam 1000000 1473 ns/op 467 B/op 5 allocs/op +BenchmarkBeego_ParseParam 1000000 2384 ns/op 368 B/op 4 allocs/op +BenchmarkBone_ParseParam 1000000 2513 ns/op 768 B/op 6 allocs/op +BenchmarkDenco_ParseParam 5000000 364 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_ParseParam 5000000 418 ns/op 32 B/op 1 allocs/op +BenchmarkGin_ParseParam 10000000 163 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_ParseParam 1000000 2361 ns/op 664 B/op 8 allocs/op +BenchmarkGoji_ParseParam 1000000 1590 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_ParseParam 1000000 2851 ns/op 976 B/op 9 allocs/op +BenchmarkGoJsonRest_ParseParam 1000000 2965 ns/op 649 B/op 13 allocs/op +BenchmarkGoRestful_ParseParam 200000 12207 ns/op 3544 B/op 23 allocs/op +BenchmarkGorillaMux_ParseParam 500000 5187 ns/op 1088 B/op 12 allocs/op +BenchmarkHttpRouter_ParseParam 5000000 275 ns/op 64 B/op 1 allocs/op +BenchmarkHttpTreeMux_ParseParam 1000000 1108 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_ParseParam 3000000 495 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_ParseParam 10000000 192 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParseParam 500000 4103 ns/op 1056 B/op 10 allocs/op +BenchmarkMartini_ParseParam 200000 9878 ns/op 1072 B/op 10 allocs/op +BenchmarkPat_ParseParam 500000 3657 ns/op 1120 B/op 17 allocs/op +BenchmarkPossum_ParseParam 1000000 2084 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_ParseParam 1000000 1251 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_ParseParam 5000000 335 ns/op 48 B/op 1 allocs/op +BenchmarkTango_ParseParam 1000000 1854 ns/op 280 B/op 8 allocs/op +BenchmarkTigerTonic_ParseParam 500000 4582 ns/op 1008 B/op 17 allocs/op +BenchmarkTraffic_ParseParam 200000 8125 ns/op 2248 B/op 23 allocs/op +BenchmarkVulcan_ParseParam 1000000 1148 ns/op 98 B/op 3 allocs/op +BenchmarkAce_Parse2Params 3000000 539 ns/op 64 B/op 1 allocs/op +BenchmarkBear_Parse2Params 1000000 1778 ns/op 496 B/op 5 allocs/op +BenchmarkBeego_Parse2Params 1000000 2519 ns/op 368 B/op 4 allocs/op +BenchmarkBone_Parse2Params 1000000 2596 ns/op 720 B/op 5 allocs/op +BenchmarkDenco_Parse2Params 3000000 492 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_Parse2Params 3000000 484 ns/op 32 B/op 1 allocs/op +BenchmarkGin_Parse2Params 10000000 193 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_Parse2Params 1000000 2575 ns/op 712 B/op 9 allocs/op +BenchmarkGoji_Parse2Params 1000000 1373 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_Parse2Params 500000 2416 ns/op 960 B/op 8 allocs/op +BenchmarkGoJsonRest_Parse2Params 300000 3452 ns/op 713 B/op 14 allocs/op +BenchmarkGoRestful_Parse2Params 100000 17719 ns/op 6008 B/op 25 allocs/op +BenchmarkGorillaMux_Parse2Params 300000 5102 ns/op 1088 B/op 11 allocs/op +BenchmarkHttpRouter_Parse2Params 5000000 303 ns/op 64 B/op 1 allocs/op +BenchmarkHttpTreeMux_Parse2Params 1000000 1372 ns/op 384 B/op 4 allocs/op +BenchmarkKocha_Parse2Params 2000000 874 ns/op 128 B/op 5 allocs/op +BenchmarkLARS_Parse2Params 10000000 192 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Parse2Params 500000 3871 ns/op 1056 B/op 10 allocs/op +BenchmarkMartini_Parse2Params 200000 9954 ns/op 1152 B/op 11 allocs/op +BenchmarkPat_Parse2Params 500000 4194 ns/op 832 B/op 17 allocs/op +BenchmarkPossum_Parse2Params 1000000 2121 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_Parse2Params 1000000 1415 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_Parse2Params 3000000 457 ns/op 96 B/op 1 allocs/op +BenchmarkTango_Parse2Params 1000000 1914 ns/op 312 B/op 8 allocs/op +BenchmarkTigerTonic_Parse2Params 300000 6895 ns/op 1408 B/op 24 allocs/op +BenchmarkTraffic_Parse2Params 200000 8317 ns/op 2040 B/op 22 allocs/op +BenchmarkVulcan_Parse2Params 1000000 1274 ns/op 98 B/op 3 allocs/op +BenchmarkAce_ParseAll 200000 10401 ns/op 640 B/op 16 allocs/op +BenchmarkBear_ParseAll 50000 37743 ns/op 8928 B/op 110 allocs/op +BenchmarkBeego_ParseAll 20000 63193 ns/op 9568 B/op 104 allocs/op +BenchmarkBone_ParseAll 20000 61767 ns/op 14160 B/op 131 allocs/op +BenchmarkDenco_ParseAll 300000 7036 ns/op 928 B/op 16 allocs/op +BenchmarkEcho_ParseAll 200000 11824 ns/op 832 B/op 26 allocs/op +BenchmarkGin_ParseAll 300000 4199 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_ParseAll 30000 51758 ns/op 13728 B/op 181 allocs/op +BenchmarkGoji_ParseAll 50000 29614 ns/op 5376 B/op 32 allocs/op +BenchmarkGojiv2_ParseAll 20000 68676 ns/op 24464 B/op 199 allocs/op +BenchmarkGoJsonRest_ParseAll 20000 76135 ns/op 13866 B/op 321 allocs/op +BenchmarkGoRestful_ParseAll 5000 389487 ns/op 110928 B/op 600 allocs/op +BenchmarkGorillaMux_ParseAll 10000 221250 ns/op 24864 B/op 292 allocs/op +BenchmarkHttpRouter_ParseAll 200000 6444 ns/op 640 B/op 16 allocs/op +BenchmarkHttpTreeMux_ParseAll 50000 30702 ns/op 5728 B/op 51 allocs/op +BenchmarkKocha_ParseAll 200000 13712 ns/op 1112 B/op 54 allocs/op +BenchmarkLARS_ParseAll 300000 6925 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParseAll 20000 96278 ns/op 24576 B/op 250 allocs/op +BenchmarkMartini_ParseAll 5000 271352 ns/op 25072 B/op 253 allocs/op +BenchmarkPat_ParseAll 20000 74941 ns/op 17264 B/op 343 allocs/op +BenchmarkPossum_ParseAll 50000 39947 ns/op 10816 B/op 78 allocs/op +BenchmarkR2router_ParseAll 50000 42479 ns/op 8352 B/op 120 allocs/op +BenchmarkRivet_ParseAll 200000 7726 ns/op 912 B/op 16 allocs/op +BenchmarkTango_ParseAll 30000 50014 ns/op 7168 B/op 208 allocs/op +BenchmarkTigerTonic_ParseAll 10000 106550 ns/op 19728 B/op 379 allocs/op +BenchmarkTraffic_ParseAll 10000 216037 ns/op 57776 B/op 642 allocs/op +BenchmarkVulcan_ParseAll 50000 34379 ns/op 2548 B/op 78 allocs/op +``` diff --git a/vendor/github.com/gin-gonic/gin/CHANGELOG.md b/vendor/github.com/gin-gonic/gin/CHANGELOG.md new file mode 100644 index 000000000..ee485ec3b --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/CHANGELOG.md @@ -0,0 +1,191 @@ +# CHANGELOG + +### Gin 1.2 + +- [NEW] Switch from godeps to govendor +- [NEW] Add support for Let's Encrypt via gin-gonic/autotls +- [NEW] Improve README examples and add extra at examples folder +- [NEW] Improved support with App Engine +- [NEW] Add custom template delimiters, see #860 +- [NEW] Add Template Func Maps, see #962 +- [NEW] Add \*context.Handler(), see #928 +- [NEW] Add \*context.GetRawData() +- [NEW] Add \*context.GetHeader() (request) +- [NEW] Add \*context.AbortWithStatusJSON() (JSON content type) +- [NEW] Add \*context.Keys type cast helpers +- [NEW] Add \*context.ShouldBindWith() +- [NEW] Add \*context.MustBindWith() +- [NEW] Add \*engine.SetFuncMap() +- [DEPRECATE] On next release: \*context.BindWith(), see #855 +- [FIX] Refactor render +- [FIX] Reworked tests +- [FIX] logger now supports cygwin +- [FIX] Use X-Forwarded-For before X-Real-Ip +- [FIX] time.Time binding (#904) + +### Gin 1.1.4 + +- [NEW] Support google appengine for IsTerminal func + +### Gin 1.1.3 + +- [FIX] Reverted Logger: skip ANSI color commands + +### Gin 1.1 + +- [NEW] Implement QueryArray and PostArray methods +- [NEW] Refactor GetQuery and GetPostForm +- [NEW] Add contribution guide +- [FIX] Corrected typos in README +- [FIX] Removed additional Iota +- [FIX] Changed imports to gopkg instead of github in README (#733) +- [FIX] Logger: skip ANSI color commands if output is not a tty + +### Gin 1.0rc2 (...) + +- [PERFORMANCE] Fast path for writing Content-Type. +- [PERFORMANCE] Much faster 404 routing +- [PERFORMANCE] Allocation optimizations +- [PERFORMANCE] Faster root tree lookup +- [PERFORMANCE] Zero overhead, String() and JSON() rendering. +- [PERFORMANCE] Faster ClientIP parsing +- [PERFORMANCE] Much faster SSE implementation +- [NEW] Benchmarks suite +- [NEW] Bind validation can be disabled and replaced with custom validators. +- [NEW] More flexible HTML render +- [NEW] Multipart and PostForm bindings +- [NEW] Adds method to return all the registered routes +- [NEW] Context.HandlerName() returns the main handler's name +- [NEW] Adds Error.IsType() helper +- [FIX] Binding multipart form +- [FIX] Integration tests +- [FIX] Crash when binding non struct object in Context. +- [FIX] RunTLS() implementation +- [FIX] Logger() unit tests +- [FIX] Adds SetHTMLTemplate() warning +- [FIX] Context.IsAborted() +- [FIX] More unit tests +- [FIX] JSON, XML, HTML renders accept custom content-types +- [FIX] gin.AbortIndex is unexported +- [FIX] Better approach to avoid directory listing in StaticFS() +- [FIX] Context.ClientIP() always returns the IP with trimmed spaces. +- [FIX] Better warning when running in debug mode. +- [FIX] Google App Engine integration. debugPrint does not use os.Stdout +- [FIX] Fixes integer overflow in error type +- [FIX] Error implements the json.Marshaller interface +- [FIX] MIT license in every file + + +### Gin 1.0rc1 (May 22, 2015) + +- [PERFORMANCE] Zero allocation router +- [PERFORMANCE] Faster JSON, XML and text rendering +- [PERFORMANCE] Custom hand optimized HttpRouter for Gin +- [PERFORMANCE] Misc code optimizations. Inlining, tail call optimizations +- [NEW] Built-in support for golang.org/x/net/context +- [NEW] Any(path, handler). Create a route that matches any path +- [NEW] Refactored rendering pipeline (faster and static typeded) +- [NEW] Refactored errors API +- [NEW] IndentedJSON() prints pretty JSON +- [NEW] Added gin.DefaultWriter +- [NEW] UNIX socket support +- [NEW] RouterGroup.BasePath is exposed +- [NEW] JSON validation using go-validate-yourself (very powerful options) +- [NEW] Completed suite of unit tests +- [NEW] HTTP streaming with c.Stream() +- [NEW] StaticFile() creates a router for serving just one file. +- [NEW] StaticFS() has an option to disable directory listing. +- [NEW] StaticFS() for serving static files through virtual filesystems +- [NEW] Server-Sent Events native support +- [NEW] WrapF() and WrapH() helpers for wrapping http.HandlerFunc and http.Handler +- [NEW] Added LoggerWithWriter() middleware +- [NEW] Added RecoveryWithWriter() middleware +- [NEW] Added DefaultPostFormValue() +- [NEW] Added DefaultFormValue() +- [NEW] Added DefaultParamValue() +- [FIX] BasicAuth() when using custom realm +- [FIX] Bug when serving static files in nested routing group +- [FIX] Redirect using built-in http.Redirect() +- [FIX] Logger when printing the requested path +- [FIX] Documentation typos +- [FIX] Context.Engine renamed to Context.engine +- [FIX] Better debugging messages +- [FIX] ErrorLogger +- [FIX] Debug HTTP render +- [FIX] Refactored binding and render modules +- [FIX] Refactored Context initialization +- [FIX] Refactored BasicAuth() +- [FIX] NoMethod/NoRoute handlers +- [FIX] Hijacking http +- [FIX] Better support for Google App Engine (using log instead of fmt) + + +### Gin 0.6 (Mar 9, 2015) + +- [NEW] Support multipart/form-data +- [NEW] NoMethod handler +- [NEW] Validate sub structures +- [NEW] Support for HTTP Realm Auth +- [FIX] Unsigned integers in binding +- [FIX] Improve color logger + + +### Gin 0.5 (Feb 7, 2015) + +- [NEW] Content Negotiation +- [FIX] Solved security bug that allow a client to spoof ip +- [FIX] Fix unexported/ignored fields in binding + + +### Gin 0.4 (Aug 21, 2014) + +- [NEW] Development mode +- [NEW] Unit tests +- [NEW] Add Content.Redirect() +- [FIX] Deferring WriteHeader() +- [FIX] Improved documentation for model binding + + +### Gin 0.3 (Jul 18, 2014) + +- [PERFORMANCE] Normal log and error log are printed in the same call. +- [PERFORMANCE] Improve performance of NoRouter() +- [PERFORMANCE] Improve context's memory locality, reduce CPU cache faults. +- [NEW] Flexible rendering API +- [NEW] Add Context.File() +- [NEW] Add shorcut RunTLS() for http.ListenAndServeTLS +- [FIX] Rename NotFound404() to NoRoute() +- [FIX] Errors in context are purged +- [FIX] Adds HEAD method in Static file serving +- [FIX] Refactors Static() file serving +- [FIX] Using keyed initialization to fix app-engine integration +- [FIX] Can't unmarshal JSON array, #63 +- [FIX] Renaming Context.Req to Context.Request +- [FIX] Check application/x-www-form-urlencoded when parsing form + + +### Gin 0.2b (Jul 08, 2014) +- [PERFORMANCE] Using sync.Pool to allocatio/gc overhead +- [NEW] Travis CI integration +- [NEW] Completely new logger +- [NEW] New API for serving static files. gin.Static() +- [NEW] gin.H() can be serialized into XML +- [NEW] Typed errors. Errors can be typed. Internet/external/custom. +- [NEW] Support for Godeps +- [NEW] Travis/Godocs badges in README +- [NEW] New Bind() and BindWith() methods for parsing request body. +- [NEW] Add Content.Copy() +- [NEW] Add context.LastError() +- [NEW] Add shorcut for OPTIONS HTTP method +- [FIX] Tons of README fixes +- [FIX] Header is written before body +- [FIX] BasicAuth() and changes API a little bit +- [FIX] Recovery() middleware only prints panics +- [FIX] Context.Get() does not panic anymore. Use MustGet() instead. +- [FIX] Multiple http.WriteHeader() in NotFound handlers +- [FIX] Engine.Run() panics if http server can't be setted up +- [FIX] Crash when route path doesn't start with '/' +- [FIX] Do not update header when status code is negative +- [FIX] Setting response headers before calling WriteHeader in context.String() +- [FIX] Add MIT license +- [FIX] Changes behaviour of ErrorLogger() and Logger() diff --git a/vendor/github.com/gin-gonic/gin/CODE_OF_CONDUCT.md b/vendor/github.com/gin-gonic/gin/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..4ea14f395 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at teamgingonic@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/gin-gonic/gin/CONTRIBUTING.md b/vendor/github.com/gin-gonic/gin/CONTRIBUTING.md new file mode 100644 index 000000000..547b777a1 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/CONTRIBUTING.md @@ -0,0 +1,13 @@ +## Contributing + +- With issues: + - Use the search tool before opening a new issue. + - Please provide source code and commit sha if you found a bug. + - Review existing issues and provide feedback or react to them. + +- With pull requests: + - Open your pull request against `master` + - Your pull request should have no more than two commits, if not you should squash them. + - It should pass all tests in the available continuous integrations systems such as TravisCI. + - You should add/modify tests to cover your proposed code changes. + - If your pull request contains a new feature, please document it on the README. diff --git a/vendor/github.com/gin-gonic/gin/LICENSE b/vendor/github.com/gin-gonic/gin/LICENSE new file mode 100644 index 000000000..1ff7f3706 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Manuel Martínez-Almeida + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/gin-gonic/gin/Makefile b/vendor/github.com/gin-gonic/gin/Makefile new file mode 100644 index 000000000..5468563a0 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/Makefile @@ -0,0 +1,61 @@ +GOFMT ?= gofmt "-s" +PACKAGES ?= $(shell go list ./... | grep -v /vendor/) +GOFILES := $(shell find . -name "*.go" -type f -not -path "./vendor/*") + +all: install + +install: deps + govendor sync + +.PHONY: test +test: + sh coverage.sh + +.PHONY: fmt +fmt: + $(GOFMT) -w $(GOFILES) + +.PHONY: fmt-check +fmt-check: + # get all go files and run go fmt on them + @diff=$$($(GOFMT) -d $(GOFILES)); \ + if [ -n "$$diff" ]; then \ + echo "Please run 'make fmt' and commit the result:"; \ + echo "$${diff}"; \ + exit 1; \ + fi; + +vet: + go vet $(PACKAGES) + +deps: + @hash govendor > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + go get -u github.com/kardianos/govendor; \ + fi + @hash embedmd > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + go get -u github.com/campoy/embedmd; \ + fi + +embedmd: + embedmd -d *.md + +.PHONY: lint +lint: + @hash golint > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + go get -u github.com/golang/lint/golint; \ + fi + for PKG in $(PACKAGES); do golint -set_exit_status $$PKG || exit 1; done; + +.PHONY: misspell-check +misspell-check: + @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + go get -u github.com/client9/misspell/cmd/misspell; \ + fi + misspell -error $(GOFILES) + +.PHONY: misspell +misspell: + @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + go get -u github.com/client9/misspell/cmd/misspell; \ + fi + misspell -w $(GOFILES) diff --git a/vendor/github.com/gin-gonic/gin/README.md b/vendor/github.com/gin-gonic/gin/README.md new file mode 100644 index 000000000..b51aa10c5 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/README.md @@ -0,0 +1,1399 @@ +# Gin Web Framework + + + +[![Build Status](https://travis-ci.org/gin-gonic/gin.svg)](https://travis-ci.org/gin-gonic/gin) + [![codecov](https://codecov.io/gh/gin-gonic/gin/branch/master/graph/badge.svg)](https://codecov.io/gh/gin-gonic/gin) + [![Go Report Card](https://goreportcard.com/badge/github.com/gin-gonic/gin)](https://goreportcard.com/report/github.com/gin-gonic/gin) + [![GoDoc](https://godoc.org/github.com/gin-gonic/gin?status.svg)](https://godoc.org/github.com/gin-gonic/gin) + [![Join the chat at https://gitter.im/gin-gonic/gin](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gin-gonic/gin?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Open Source Helpers](https://www.codetriage.com/gin-gonic/gin/badges/users.svg)](https://www.codetriage.com/gin-gonic/gin) + +Gin is a web framework written in Go (Golang). It features a martini-like API with much better performance, up to 40 times faster thanks to [httprouter](https://github.com/julienschmidt/httprouter). If you need performance and good productivity, you will love Gin. + +![Gin console logger](https://gin-gonic.github.io/gin/other/console.png) + +```sh +# assume the following codes in example.go file +$ cat example.go +``` + +```go +package main + +import "github.com/gin-gonic/gin" + +func main() { + r := gin.Default() + r.GET("/ping", func(c *gin.Context) { + c.JSON(200, gin.H{ + "message": "pong", + }) + }) + r.Run() // listen and serve on 0.0.0.0:8080 +} +``` + +``` +# run example.go and visit 0.0.0.0:8080/ping on browser +$ go run example.go +``` + +## Benchmarks + +Gin uses a custom version of [HttpRouter](https://github.com/julienschmidt/httprouter) + +[See all benchmarks](/BENCHMARKS.md) + +Benchmark name | (1) | (2) | (3) | (4) +--------------------------------------------|-----------:|------------:|-----------:|---------: +**BenchmarkGin_GithubAll** | **30000** | **48375** | **0** | **0** +BenchmarkAce_GithubAll | 10000 | 134059 | 13792 | 167 +BenchmarkBear_GithubAll | 5000 | 534445 | 86448 | 943 +BenchmarkBeego_GithubAll | 3000 | 592444 | 74705 | 812 +BenchmarkBone_GithubAll | 200 | 6957308 | 698784 | 8453 +BenchmarkDenco_GithubAll | 10000 | 158819 | 20224 | 167 +BenchmarkEcho_GithubAll | 10000 | 154700 | 6496 | 203 +BenchmarkGocraftWeb_GithubAll | 3000 | 570806 | 131656 | 1686 +BenchmarkGoji_GithubAll | 2000 | 818034 | 56112 | 334 +BenchmarkGojiv2_GithubAll | 2000 | 1213973 | 274768 | 3712 +BenchmarkGoJsonRest_GithubAll | 2000 | 785796 | 134371 | 2737 +BenchmarkGoRestful_GithubAll | 300 | 5238188 | 689672 | 4519 +BenchmarkGorillaMux_GithubAll | 100 | 10257726 | 211840 | 2272 +BenchmarkHttpRouter_GithubAll | 20000 | 105414 | 13792 | 167 +BenchmarkHttpTreeMux_GithubAll | 10000 | 319934 | 65856 | 671 +BenchmarkKocha_GithubAll | 10000 | 209442 | 23304 | 843 +BenchmarkLARS_GithubAll | 20000 | 62565 | 0 | 0 +BenchmarkMacaron_GithubAll | 2000 | 1161270 | 204194 | 2000 +BenchmarkMartini_GithubAll | 200 | 9991713 | 226549 | 2325 +BenchmarkPat_GithubAll | 200 | 5590793 | 1499568 | 27435 +BenchmarkPossum_GithubAll | 10000 | 319768 | 84448 | 609 +BenchmarkR2router_GithubAll | 10000 | 305134 | 77328 | 979 +BenchmarkRivet_GithubAll | 10000 | 132134 | 16272 | 167 +BenchmarkTango_GithubAll | 3000 | 552754 | 63826 | 1618 +BenchmarkTigerTonic_GithubAll | 1000 | 1439483 | 239104 | 5374 +BenchmarkTraffic_GithubAll | 100 | 11383067 | 2659329 | 21848 +BenchmarkVulcan_GithubAll | 5000 | 394253 | 19894 | 609 + +- (1): Total Repetitions achieved in constant time, higher means more confident result +- (2): Single Repetition Duration (ns/op), lower is better +- (3): Heap Memory (B/op), lower is better +- (4): Average Allocations per Repetition (allocs/op), lower is better + +## Gin v1. stable + +- [x] Zero allocation router. +- [x] Still the fastest http router and framework. From routing to writing. +- [x] Complete suite of unit tests +- [x] Battle tested +- [x] API frozen, new releases will not break your code. + +## Start using it + +1. Download and install it: + +```sh +$ go get github.com/gin-gonic/gin +``` + +2. Import it in your code: + +```go +import "github.com/gin-gonic/gin" +``` + +3. (Optional) Import `net/http`. This is required for example if using constants such as `http.StatusOK`. + +```go +import "net/http" +``` + +### Use a vendor tool like [Govendor](https://github.com/kardianos/govendor) + +1. `go get` govendor + +```sh +$ go get github.com/kardianos/govendor +``` +2. Create your project folder and `cd` inside + +```sh +$ mkdir -p $GOPATH/src/github.com/myusername/project && cd "$_" +``` + +3. Vendor init your project and add gin + +```sh +$ govendor init +$ govendor fetch github.com/gin-gonic/gin@v1.2 +``` + +4. Copy a starting template inside your project + +```sh +$ curl https://raw.githubusercontent.com/gin-gonic/gin/master/examples/basic/main.go > main.go +``` + +5. Run your project + +```sh +$ go run main.go +``` + +## Build with [jsoniter](https://github.com/json-iterator/go) + +Gin use `encoding/json` as default json package but you can change to [jsoniter](https://github.com/json-iterator/go) by build from other tags. + +```sh +$ go build -tags=jsoniter . +``` + +## API Examples + +### Using GET, POST, PUT, PATCH, DELETE and OPTIONS + +```go +func main() { + // Disable Console Color + // gin.DisableConsoleColor() + + // Creates a gin router with default middleware: + // logger and recovery (crash-free) middleware + router := gin.Default() + + router.GET("/someGet", getting) + router.POST("/somePost", posting) + router.PUT("/somePut", putting) + router.DELETE("/someDelete", deleting) + router.PATCH("/somePatch", patching) + router.HEAD("/someHead", head) + router.OPTIONS("/someOptions", options) + + // By default it serves on :8080 unless a + // PORT environment variable was defined. + router.Run() + // router.Run(":3000") for a hard coded port +} +``` + +### Parameters in path + +```go +func main() { + router := gin.Default() + + // This handler will match /user/john but will not match neither /user/ or /user + router.GET("/user/:name", func(c *gin.Context) { + name := c.Param("name") + c.String(http.StatusOK, "Hello %s", name) + }) + + // However, this one will match /user/john/ and also /user/john/send + // If no other routers match /user/john, it will redirect to /user/john/ + router.GET("/user/:name/*action", func(c *gin.Context) { + name := c.Param("name") + action := c.Param("action") + message := name + " is " + action + c.String(http.StatusOK, message) + }) + + router.Run(":8080") +} +``` + +### Querystring parameters + +```go +func main() { + router := gin.Default() + + // Query string parameters are parsed using the existing underlying request object. + // The request responds to a url matching: /welcome?firstname=Jane&lastname=Doe + router.GET("/welcome", func(c *gin.Context) { + firstname := c.DefaultQuery("firstname", "Guest") + lastname := c.Query("lastname") // shortcut for c.Request.URL.Query().Get("lastname") + + c.String(http.StatusOK, "Hello %s %s", firstname, lastname) + }) + router.Run(":8080") +} +``` + +### Multipart/Urlencoded Form + +```go +func main() { + router := gin.Default() + + router.POST("/form_post", func(c *gin.Context) { + message := c.PostForm("message") + nick := c.DefaultPostForm("nick", "anonymous") + + c.JSON(200, gin.H{ + "status": "posted", + "message": message, + "nick": nick, + }) + }) + router.Run(":8080") +} +``` + +### Another example: query + post form + +``` +POST /post?id=1234&page=1 HTTP/1.1 +Content-Type: application/x-www-form-urlencoded + +name=manu&message=this_is_great +``` + +```go +func main() { + router := gin.Default() + + router.POST("/post", func(c *gin.Context) { + + id := c.Query("id") + page := c.DefaultQuery("page", "0") + name := c.PostForm("name") + message := c.PostForm("message") + + fmt.Printf("id: %s; page: %s; name: %s; message: %s", id, page, name, message) + }) + router.Run(":8080") +} +``` + +``` +id: 1234; page: 1; name: manu; message: this_is_great +``` + +### Upload files + +#### Single file + +References issue [#774](https://github.com/gin-gonic/gin/issues/774) and detail [example code](examples/upload-file/single). + +```go +func main() { + router := gin.Default() + // Set a lower memory limit for multipart forms (default is 32 MiB) + // router.MaxMultipartMemory = 8 << 20 // 8 MiB + router.POST("/upload", func(c *gin.Context) { + // single file + file, _ := c.FormFile("file") + log.Println(file.Filename) + + // Upload the file to specific dst. + // c.SaveUploadedFile(file, dst) + + c.String(http.StatusOK, fmt.Sprintf("'%s' uploaded!", file.Filename)) + }) + router.Run(":8080") +} +``` + +How to `curl`: + +```bash +curl -X POST http://localhost:8080/upload \ + -F "file=@/Users/appleboy/test.zip" \ + -H "Content-Type: multipart/form-data" +``` + +#### Multiple files + +See the detail [example code](examples/upload-file/multiple). + +```go +func main() { + router := gin.Default() + // Set a lower memory limit for multipart forms (default is 32 MiB) + // router.MaxMultipartMemory = 8 << 20 // 8 MiB + router.POST("/upload", func(c *gin.Context) { + // Multipart form + form, _ := c.MultipartForm() + files := form.File["upload[]"] + + for _, file := range files { + log.Println(file.Filename) + + // Upload the file to specific dst. + // c.SaveUploadedFile(file, dst) + } + c.String(http.StatusOK, fmt.Sprintf("%d files uploaded!", len(files))) + }) + router.Run(":8080") +} +``` + +How to `curl`: + +```bash +curl -X POST http://localhost:8080/upload \ + -F "upload[]=@/Users/appleboy/test1.zip" \ + -F "upload[]=@/Users/appleboy/test2.zip" \ + -H "Content-Type: multipart/form-data" +``` + +### Grouping routes + +```go +func main() { + router := gin.Default() + + // Simple group: v1 + v1 := router.Group("/v1") + { + v1.POST("/login", loginEndpoint) + v1.POST("/submit", submitEndpoint) + v1.POST("/read", readEndpoint) + } + + // Simple group: v2 + v2 := router.Group("/v2") + { + v2.POST("/login", loginEndpoint) + v2.POST("/submit", submitEndpoint) + v2.POST("/read", readEndpoint) + } + + router.Run(":8080") +} +``` + +### Blank Gin without middleware by default + +Use + +```go +r := gin.New() +``` + +instead of + +```go +// Default With the Logger and Recovery middleware already attached +r := gin.Default() +``` + + +### Using middleware +```go +func main() { + // Creates a router without any middleware by default + r := gin.New() + + // Global middleware + // Logger middleware will write the logs to gin.DefaultWriter even if you set with GIN_MODE=release. + // By default gin.DefaultWriter = os.Stdout + r.Use(gin.Logger()) + + // Recovery middleware recovers from any panics and writes a 500 if there was one. + r.Use(gin.Recovery()) + + // Per route middleware, you can add as many as you desire. + r.GET("/benchmark", MyBenchLogger(), benchEndpoint) + + // Authorization group + // authorized := r.Group("/", AuthRequired()) + // exactly the same as: + authorized := r.Group("/") + // per group middleware! in this case we use the custom created + // AuthRequired() middleware just in the "authorized" group. + authorized.Use(AuthRequired()) + { + authorized.POST("/login", loginEndpoint) + authorized.POST("/submit", submitEndpoint) + authorized.POST("/read", readEndpoint) + + // nested group + testing := authorized.Group("testing") + testing.GET("/analytics", analyticsEndpoint) + } + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### How to write log file +```go +func main() { + // Disable Console Color, you don't need console color when writing the logs to file. + gin.DisableConsoleColor() + + // Logging to a file. + f, _ := os.Create("gin.log") + gin.DefaultWriter = io.MultiWriter(f) + + // Use the following code if you need to write the logs to file and console at the same time. + // gin.DefaultWriter = io.MultiWriter(f, os.Stdout) + + router := gin.Default() + router.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + +    router.Run(":8080") +} +``` + +### Model binding and validation + +To bind a request body into a type, use model binding. We currently support binding of JSON, XML and standard form values (foo=bar&boo=baz). + +Gin uses [**go-playground/validator.v8**](https://github.com/go-playground/validator) for validation. Check the full docs on tags usage [here](http://godoc.org/gopkg.in/go-playground/validator.v8#hdr-Baked_In_Validators_and_Tags). + +Note that you need to set the corresponding binding tag on all fields you want to bind. For example, when binding from JSON, set `json:"fieldname"`. + +Also, Gin provides two sets of methods for binding: +- **Type** - Must bind + - **Methods** - `Bind`, `BindJSON`, `BindQuery` + - **Behavior** - These methods use `MustBindWith` under the hood. If there is a binding error, the request is aborted with `c.AbortWithError(400, err).SetType(ErrorTypeBind)`. This sets the response status code to 400 and the `Content-Type` header is set to `text/plain; charset=utf-8`. Note that if you try to set the response code after this, it will result in a warning `[GIN-debug] [WARNING] Headers were already written. Wanted to override status code 400 with 422`. If you wish to have greater control over the behavior, consider using the `ShouldBind` equivalent method. +- **Type** - Should bind + - **Methods** - `ShouldBind`, `ShouldBindJSON`, `ShouldBindQuery` + - **Behavior** - These methods use `ShouldBindWith` under the hood. If there is a binding error, the error is returned and it is the developer's responsibility to handle the request and error appropriately. + +When using the Bind-method, Gin tries to infer the binder depending on the Content-Type header. If you are sure what you are binding, you can use `MustBindWith` or `ShouldBindWith`. + +You can also specify that specific fields are required. If a field is decorated with `binding:"required"` and has a empty value when binding, an error will be returned. + +```go +// Binding from JSON +type Login struct { + User string `form:"user" json:"user" binding:"required"` + Password string `form:"password" json:"password" binding:"required"` +} + +func main() { + router := gin.Default() + + // Example for binding JSON ({"user": "manu", "password": "123"}) + router.POST("/loginJSON", func(c *gin.Context) { + var json Login + if err := c.ShouldBindJSON(&json); err == nil { + if json.User == "manu" && json.Password == "123" { + c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) + } else { + c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) + } + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } + }) + + // Example for binding a HTML form (user=manu&password=123) + router.POST("/loginForm", func(c *gin.Context) { + var form Login + // This will infer what binder to use depending on the content-type header. + if err := c.ShouldBind(&form); err == nil { + if form.User == "manu" && form.Password == "123" { + c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) + } else { + c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) + } + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } + }) + + // Listen and serve on 0.0.0.0:8080 + router.Run(":8080") +} +``` + +**Sample request** +```shell +$ curl -v -X POST \ + http://localhost:8080/loginJSON \ + -H 'content-type: application/json' \ + -d '{ "user": "manu" }' +> POST /loginJSON HTTP/1.1 +> Host: localhost:8080 +> User-Agent: curl/7.51.0 +> Accept: */* +> content-type: application/json +> Content-Length: 18 +> +* upload completely sent off: 18 out of 18 bytes +< HTTP/1.1 400 Bad Request +< Content-Type: application/json; charset=utf-8 +< Date: Fri, 04 Aug 2017 03:51:31 GMT +< Content-Length: 100 +< +{"error":"Key: 'Login.Password' Error:Field validation for 'Password' failed on the 'required' tag"} +``` + +### Custom Validators + +It is also possible to register custom validators. See the [example code](examples/custom-validation/server.go). + +[embedmd]:# (examples/custom-validation/server.go go) +```go +package main + +import ( + "net/http" + "reflect" + "time" + + "github.com/gin-gonic/gin" + "github.com/gin-gonic/gin/binding" + "gopkg.in/go-playground/validator.v8" +) + +type Booking struct { + CheckIn time.Time `form:"check_in" binding:"required,bookabledate" time_format:"2006-01-02"` + CheckOut time.Time `form:"check_out" binding:"required,gtfield=CheckIn" time_format:"2006-01-02"` +} + +func bookableDate( + v *validator.Validate, topStruct reflect.Value, currentStructOrField reflect.Value, + field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string, +) bool { + if date, ok := field.Interface().(time.Time); ok { + today := time.Now() + if today.Year() > date.Year() || today.YearDay() > date.YearDay() { + return false + } + } + return true +} + +func main() { + route := gin.Default() + binding.Validator.RegisterValidation("bookabledate", bookableDate) + route.GET("/bookable", getBookable) + route.Run(":8085") +} + +func getBookable(c *gin.Context) { + var b Booking + if err := c.ShouldBindWith(&b, binding.Query); err == nil { + c.JSON(http.StatusOK, gin.H{"message": "Booking dates are valid!"}) + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } +} +``` + +```console +$ curl "localhost:8085/bookable?check_in=2017-08-16&check_out=2017-08-17" +{"message":"Booking dates are valid!"} + +$ curl "localhost:8085/bookable?check_in=2017-08-15&check_out=2017-08-16" +{"error":"Key: 'Booking.CheckIn' Error:Field validation for 'CheckIn' failed on the 'bookabledate' tag"} +``` + +### Only Bind Query String + +`ShouldBindQuery` function only binds the query params and not the post data. See the [detail information](https://github.com/gin-gonic/gin/issues/742#issuecomment-315953017). + +```go +package main + +import ( + "log" + + "github.com/gin-gonic/gin" +) + +type Person struct { + Name string `form:"name"` + Address string `form:"address"` +} + +func main() { + route := gin.Default() + route.Any("/testing", startPage) + route.Run(":8085") +} + +func startPage(c *gin.Context) { + var person Person + if c.ShouldBindQuery(&person) == nil { + log.Println("====== Only Bind By Query String ======") + log.Println(person.Name) + log.Println(person.Address) + } + c.String(200, "Success") +} + +``` + +### Bind Query String or Post Data + +See the [detail information](https://github.com/gin-gonic/gin/issues/742#issuecomment-264681292). + +```go +package main + +import "log" +import "github.com/gin-gonic/gin" +import "time" + +type Person struct { + Name string `form:"name"` + Address string `form:"address"` + Birthday time.Time `form:"birthday" time_format:"2006-01-02" time_utc:"1"` +} + +func main() { + route := gin.Default() + route.GET("/testing", startPage) + route.Run(":8085") +} + +func startPage(c *gin.Context) { + var person Person + // If `GET`, only `Form` binding engine (`query`) used. + // If `POST`, first checks the `content-type` for `JSON` or `XML`, then uses `Form` (`form-data`). + // See more at https://github.com/gin-gonic/gin/blob/master/binding/binding.go#L48 + if c.ShouldBind(&person) == nil { + log.Println(person.Name) + log.Println(person.Address) + log.Println(person.Birthday) + } + + c.String(200, "Success") +} +``` + +Test it with: +```sh +$ curl -X GET "localhost:8085/testing?name=appleboy&address=xyz&birthday=1992-03-15" +``` + +### Bind HTML checkboxes + +See the [detail information](https://github.com/gin-gonic/gin/issues/129#issuecomment-124260092) + +main.go + +```go +... + +type myForm struct { + Colors []string `form:"colors[]"` +} + +... + +func formHandler(c *gin.Context) { + var fakeForm myForm + c.ShouldBind(&fakeForm) + c.JSON(200, gin.H{"color": fakeForm.Colors}) +} + +... + +``` + +form.html + +```html +
+

Check some colors

+ + + + + + + +
+``` + +result: + +``` +{"color":["red","green","blue"]} +``` + +### Multipart/Urlencoded binding + +```go +package main + +import ( + "github.com/gin-gonic/gin" +) + +type LoginForm struct { + User string `form:"user" binding:"required"` + Password string `form:"password" binding:"required"` +} + +func main() { + router := gin.Default() + router.POST("/login", func(c *gin.Context) { + // you can bind multipart form with explicit binding declaration: + // c.ShouldBindWith(&form, binding.Form) + // or you can simply use autobinding with ShouldBind method: + var form LoginForm + // in this case proper binding will be automatically selected + if c.ShouldBind(&form) == nil { + if form.User == "user" && form.Password == "password" { + c.JSON(200, gin.H{"status": "you are logged in"}) + } else { + c.JSON(401, gin.H{"status": "unauthorized"}) + } + } + }) + router.Run(":8080") +} +``` + +Test it with: +```sh +$ curl -v --form user=user --form password=password http://localhost:8080/login +``` + +### XML, JSON and YAML rendering + +```go +func main() { + r := gin.Default() + + // gin.H is a shortcut for map[string]interface{} + r.GET("/someJSON", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK}) + }) + + r.GET("/moreJSON", func(c *gin.Context) { + // You also can use a struct + var msg struct { + Name string `json:"user"` + Message string + Number int + } + msg.Name = "Lena" + msg.Message = "hey" + msg.Number = 123 + // Note that msg.Name becomes "user" in the JSON + // Will output : {"user": "Lena", "Message": "hey", "Number": 123} + c.JSON(http.StatusOK, msg) + }) + + r.GET("/someXML", func(c *gin.Context) { + c.XML(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK}) + }) + + r.GET("/someYAML", func(c *gin.Context) { + c.YAML(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK}) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +#### SecureJSON + +Using SecureJSON to prevent json hijacking. Default prepends `"while(1),"` to response body if the given struct is array values. + +```go +func main() { + r := gin.Default() + + // You can also use your own secure json prefix + // r.SecureJsonPrefix(")]}',\n") + + r.GET("/someJSON", func(c *gin.Context) { + names := []string{"lena", "austin", "foo"} + + // Will output : while(1);["lena","austin","foo"] + c.SecureJSON(http.StatusOK, names) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### Serving static files + +```go +func main() { + router := gin.Default() + router.Static("/assets", "./assets") + router.StaticFS("/more_static", http.Dir("my_file_system")) + router.StaticFile("/favicon.ico", "./resources/favicon.ico") + + // Listen and serve on 0.0.0.0:8080 + router.Run(":8080") +} +``` + +### HTML rendering + +Using LoadHTMLGlob() or LoadHTMLFiles() + +```go +func main() { + router := gin.Default() + router.LoadHTMLGlob("templates/*") + //router.LoadHTMLFiles("templates/template1.html", "templates/template2.html") + router.GET("/index", func(c *gin.Context) { + c.HTML(http.StatusOK, "index.tmpl", gin.H{ + "title": "Main website", + }) + }) + router.Run(":8080") +} +``` + +templates/index.tmpl + +```html + +

+ {{ .title }} +

+ +``` + +Using templates with same name in different directories + +```go +func main() { + router := gin.Default() + router.LoadHTMLGlob("templates/**/*") + router.GET("/posts/index", func(c *gin.Context) { + c.HTML(http.StatusOK, "posts/index.tmpl", gin.H{ + "title": "Posts", + }) + }) + router.GET("/users/index", func(c *gin.Context) { + c.HTML(http.StatusOK, "users/index.tmpl", gin.H{ + "title": "Users", + }) + }) + router.Run(":8080") +} +``` + +templates/posts/index.tmpl + +```html +{{ define "posts/index.tmpl" }} +

+ {{ .title }} +

+

Using posts/index.tmpl

+ +{{ end }} +``` + +templates/users/index.tmpl + +```html +{{ define "users/index.tmpl" }} +

+ {{ .title }} +

+

Using users/index.tmpl

+ +{{ end }} +``` + +#### Custom Template renderer + +You can also use your own html template render + +```go +import "html/template" + +func main() { + router := gin.Default() + html := template.Must(template.ParseFiles("file1", "file2")) + router.SetHTMLTemplate(html) + router.Run(":8080") +} +``` + +#### Custom Delimiters + +You may use custom delims + +```go + r := gin.Default() + r.Delims("{[{", "}]}") + r.LoadHTMLGlob("/path/to/templates")) +``` + +#### Custom Template Funcs + +See the detail [example code](examples/template). + +main.go + +```go +import ( + "fmt" + "html/template" + "net/http" + "time" + + "github.com/gin-gonic/gin" +) + +func formatAsDate(t time.Time) string { + year, month, day := t.Date() + return fmt.Sprintf("%d%02d/%02d", year, month, day) +} + +func main() { + router := gin.Default() + router.Delims("{[{", "}]}") + router.SetFuncMap(template.FuncMap{ + "formatAsDate": formatAsDate, + }) + router.LoadHTMLFiles("./fixtures/basic/raw.tmpl") + + router.GET("/raw", func(c *gin.Context) { + c.HTML(http.StatusOK, "raw.tmpl", map[string]interface{}{ + "now": time.Date(2017, 07, 01, 0, 0, 0, 0, time.UTC), + }) + }) + + router.Run(":8080") +} + +``` + +raw.tmpl + +```html +Date: {[{.now | formatAsDate}]} +``` + +Result: +``` +Date: 2017/07/01 +``` + +### Multitemplate + +Gin allow by default use only one html.Template. Check [a multitemplate render](https://github.com/gin-contrib/multitemplate) for using features like go 1.6 `block template`. + +### Redirects + +Issuing a HTTP redirect is easy: + +```go +r.GET("/test", func(c *gin.Context) { + c.Redirect(http.StatusMovedPermanently, "http://www.google.com/") +}) +``` +Both internal and external locations are supported. + + +### Custom Middleware + +```go +func Logger() gin.HandlerFunc { + return func(c *gin.Context) { + t := time.Now() + + // Set example variable + c.Set("example", "12345") + + // before request + + c.Next() + + // after request + latency := time.Since(t) + log.Print(latency) + + // access the status we are sending + status := c.Writer.Status() + log.Println(status) + } +} + +func main() { + r := gin.New() + r.Use(Logger()) + + r.GET("/test", func(c *gin.Context) { + example := c.MustGet("example").(string) + + // it would print: "12345" + log.Println(example) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### Using BasicAuth() middleware + +```go +// simulate some private data +var secrets = gin.H{ + "foo": gin.H{"email": "foo@bar.com", "phone": "123433"}, + "austin": gin.H{"email": "austin@example.com", "phone": "666"}, + "lena": gin.H{"email": "lena@guapa.com", "phone": "523443"}, +} + +func main() { + r := gin.Default() + + // Group using gin.BasicAuth() middleware + // gin.Accounts is a shortcut for map[string]string + authorized := r.Group("/admin", gin.BasicAuth(gin.Accounts{ + "foo": "bar", + "austin": "1234", + "lena": "hello2", + "manu": "4321", + })) + + // /admin/secrets endpoint + // hit "localhost:8080/admin/secrets + authorized.GET("/secrets", func(c *gin.Context) { + // get user, it was set by the BasicAuth middleware + user := c.MustGet(gin.AuthUserKey).(string) + if secret, ok := secrets[user]; ok { + c.JSON(http.StatusOK, gin.H{"user": user, "secret": secret}) + } else { + c.JSON(http.StatusOK, gin.H{"user": user, "secret": "NO SECRET :("}) + } + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### Goroutines inside a middleware + +When starting new Goroutines inside a middleware or handler, you **SHOULD NOT** use the original context inside it, you have to use a read-only copy. + +```go +func main() { + r := gin.Default() + + r.GET("/long_async", func(c *gin.Context) { + // create copy to be used inside the goroutine + cCp := c.Copy() + go func() { + // simulate a long task with time.Sleep(). 5 seconds + time.Sleep(5 * time.Second) + + // note that you are using the copied context "cCp", IMPORTANT + log.Println("Done! in path " + cCp.Request.URL.Path) + }() + }) + + r.GET("/long_sync", func(c *gin.Context) { + // simulate a long task with time.Sleep(). 5 seconds + time.Sleep(5 * time.Second) + + // since we are NOT using a goroutine, we do not have to copy the context + log.Println("Done! in path " + c.Request.URL.Path) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### Custom HTTP configuration + +Use `http.ListenAndServe()` directly, like this: + +```go +func main() { + router := gin.Default() + http.ListenAndServe(":8080", router) +} +``` +or + +```go +func main() { + router := gin.Default() + + s := &http.Server{ + Addr: ":8080", + Handler: router, + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + MaxHeaderBytes: 1 << 20, + } + s.ListenAndServe() +} +``` + +### Support Let's Encrypt + +example for 1-line LetsEncrypt HTTPS servers. + +[embedmd]:# (examples/auto-tls/example1/main.go go) +```go +package main + +import ( + "log" + + "github.com/gin-gonic/autotls" + "github.com/gin-gonic/gin" +) + +func main() { + r := gin.Default() + + // Ping handler + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + log.Fatal(autotls.Run(r, "example1.com", "example2.com")) +} +``` + +example for custom autocert manager. + +[embedmd]:# (examples/auto-tls/example2/main.go go) +```go +package main + +import ( + "log" + + "github.com/gin-gonic/autotls" + "github.com/gin-gonic/gin" + "golang.org/x/crypto/acme/autocert" +) + +func main() { + r := gin.Default() + + // Ping handler + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + m := autocert.Manager{ + Prompt: autocert.AcceptTOS, + HostPolicy: autocert.HostWhitelist("example1.com", "example2.com"), + Cache: autocert.DirCache("/var/www/.cache"), + } + + log.Fatal(autotls.RunWithManager(r, &m)) +} +``` + +### Run multiple service using Gin + +See the [question](https://github.com/gin-gonic/gin/issues/346) and try the following example: + +[embedmd]:# (examples/multiple-service/main.go go) +```go +package main + +import ( + "log" + "net/http" + "time" + + "github.com/gin-gonic/gin" + "golang.org/x/sync/errgroup" +) + +var ( + g errgroup.Group +) + +func router01() http.Handler { + e := gin.New() + e.Use(gin.Recovery()) + e.GET("/", func(c *gin.Context) { + c.JSON( + http.StatusOK, + gin.H{ + "code": http.StatusOK, + "error": "Welcome server 01", + }, + ) + }) + + return e +} + +func router02() http.Handler { + e := gin.New() + e.Use(gin.Recovery()) + e.GET("/", func(c *gin.Context) { + c.JSON( + http.StatusOK, + gin.H{ + "code": http.StatusOK, + "error": "Welcome server 02", + }, + ) + }) + + return e +} + +func main() { + server01 := &http.Server{ + Addr: ":8080", + Handler: router01(), + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + } + + server02 := &http.Server{ + Addr: ":8081", + Handler: router02(), + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + } + + g.Go(func() error { + return server01.ListenAndServe() + }) + + g.Go(func() error { + return server02.ListenAndServe() + }) + + if err := g.Wait(); err != nil { + log.Fatal(err) + } +} +``` + +### Graceful restart or stop + +Do you want to graceful restart or stop your web server? +There are some ways this can be done. + +We can use [fvbock/endless](https://github.com/fvbock/endless) to replace the default `ListenAndServe`. Refer issue [#296](https://github.com/gin-gonic/gin/issues/296) for more details. + +```go +router := gin.Default() +router.GET("/", handler) +// [...] +endless.ListenAndServe(":4242", router) +``` + +An alternative to endless: + +* [manners](https://github.com/braintree/manners): A polite Go HTTP server that shuts down gracefully. +* [graceful](https://github.com/tylerb/graceful): Graceful is a Go package enabling graceful shutdown of an http.Handler server. +* [grace](https://github.com/facebookgo/grace): Graceful restart & zero downtime deploy for Go servers. + +If you are using Go 1.8, you may not need to use this library! Consider using http.Server's built-in [Shutdown()](https://golang.org/pkg/net/http/#Server.Shutdown) method for graceful shutdowns. See the full [graceful-shutdown](./examples/graceful-shutdown) example with gin. + +[embedmd]:# (examples/graceful-shutdown/graceful-shutdown/server.go go) +```go +// +build go1.8 + +package main + +import ( + "context" + "log" + "net/http" + "os" + "os/signal" + "time" + + "github.com/gin-gonic/gin" +) + +func main() { + router := gin.Default() + router.GET("/", func(c *gin.Context) { + time.Sleep(5 * time.Second) + c.String(http.StatusOK, "Welcome Gin Server") + }) + + srv := &http.Server{ + Addr: ":8080", + Handler: router, + } + + go func() { + // service connections + if err := srv.ListenAndServe(); err != nil { + log.Printf("listen: %s\n", err) + } + }() + + // Wait for interrupt signal to gracefully shutdown the server with + // a timeout of 5 seconds. + quit := make(chan os.Signal) + signal.Notify(quit, os.Interrupt) + <-quit + log.Println("Shutdown Server ...") + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := srv.Shutdown(ctx); err != nil { + log.Fatal("Server Shutdown:", err) + } + log.Println("Server exiting") +} +``` + +## Testing + +The `net/http/httptest` package is preferable way for HTTP testing. + +```go +package main + +func setupRouter() *gin.Engine { + r := gin.Default() + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + return r +} + +func main() { + r := setupRouter() + r.Run(":8080") +} +``` + +Test for code example above: + +```go +package main + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPingRoute(t *testing.T) { + router := setupRouter() + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/ping", nil) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "pong", w.Body.String()) +} +``` + +## Users [![Sourcegraph](https://sourcegraph.com/github.com/gin-gonic/gin/-/badge.svg)](https://sourcegraph.com/github.com/gin-gonic/gin?badge) + +Awesome project lists using [Gin](https://github.com/gin-gonic/gin) web framework. + +* [drone](https://github.com/drone/drone): Drone is a Continuous Delivery platform built on Docker, written in Go +* [gorush](https://github.com/appleboy/gorush): A push notification server written in Go. diff --git a/vendor/github.com/gin-gonic/gin/auth.go b/vendor/github.com/gin-gonic/gin/auth.go new file mode 100644 index 000000000..c2143091e --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/auth.go @@ -0,0 +1,95 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "crypto/subtle" + "encoding/base64" + "strconv" +) + +// AuthUserKey is the cookie name for user credential in basic auth. +const AuthUserKey = "user" + +// Accounts defines a key/value for user/pass list of authorized logins. +type Accounts map[string]string + +type authPair struct { + value string + user string +} + +type authPairs []authPair + +func (a authPairs) searchCredential(authValue string) (string, bool) { + if authValue == "" { + return "", false + } + for _, pair := range a { + if pair.value == authValue { + return pair.user, true + } + } + return "", false +} + +// BasicAuthForRealm returns a Basic HTTP Authorization middleware. It takes as arguments a map[string]string where +// the key is the user name and the value is the password, as well as the name of the Realm. +// If the realm is empty, "Authorization Required" will be used by default. +// (see http://tools.ietf.org/html/rfc2617#section-1.2) +func BasicAuthForRealm(accounts Accounts, realm string) HandlerFunc { + if realm == "" { + realm = "Authorization Required" + } + realm = "Basic realm=" + strconv.Quote(realm) + pairs := processAccounts(accounts) + return func(c *Context) { + // Search user in the slice of allowed credentials + user, found := pairs.searchCredential(c.requestHeader("Authorization")) + if !found { + // Credentials doesn't match, we return 401 and abort handlers chain. + c.Header("WWW-Authenticate", realm) + c.AbortWithStatus(401) + return + } + + // The user credentials was found, set user's id to key AuthUserKey in this context, the user's id can be read later using + // c.MustGet(gin.AuthUserKey). + c.Set(AuthUserKey, user) + } +} + +// BasicAuth returns a Basic HTTP Authorization middleware. It takes as argument a map[string]string where +// the key is the user name and the value is the password. +func BasicAuth(accounts Accounts) HandlerFunc { + return BasicAuthForRealm(accounts, "") +} + +func processAccounts(accounts Accounts) authPairs { + assert1(len(accounts) > 0, "Empty list of authorized credentials") + pairs := make(authPairs, 0, len(accounts)) + for user, password := range accounts { + assert1(user != "", "User can not be empty") + value := authorizationHeader(user, password) + pairs = append(pairs, authPair{ + value: value, + user: user, + }) + } + return pairs +} + +func authorizationHeader(user, password string) string { + base := user + ":" + password + return "Basic " + base64.StdEncoding.EncodeToString([]byte(base)) +} + +func secureCompare(given, actual string) bool { + if subtle.ConstantTimeEq(int32(len(given)), int32(len(actual))) == 1 { + return subtle.ConstantTimeCompare([]byte(given), []byte(actual)) == 1 + } + // Securely compare actual to itself to keep constant time, but always return false. + return subtle.ConstantTimeCompare([]byte(actual), []byte(actual)) == 1 && false +} diff --git a/vendor/github.com/gin-gonic/gin/auth_test.go b/vendor/github.com/gin-gonic/gin/auth_test.go new file mode 100644 index 000000000..dc8523b0b --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/auth_test.go @@ -0,0 +1,146 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "encoding/base64" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBasicAuth(t *testing.T) { + pairs := processAccounts(Accounts{ + "admin": "password", + "foo": "bar", + "bar": "foo", + }) + + assert.Len(t, pairs, 3) + assert.Contains(t, pairs, authPair{ + user: "bar", + value: "Basic YmFyOmZvbw==", + }) + assert.Contains(t, pairs, authPair{ + user: "foo", + value: "Basic Zm9vOmJhcg==", + }) + assert.Contains(t, pairs, authPair{ + user: "admin", + value: "Basic YWRtaW46cGFzc3dvcmQ=", + }) +} + +func TestBasicAuthFails(t *testing.T) { + assert.Panics(t, func() { processAccounts(nil) }) + assert.Panics(t, func() { + processAccounts(Accounts{ + "": "password", + "foo": "bar", + }) + }) +} + +func TestBasicAuthSearchCredential(t *testing.T) { + pairs := processAccounts(Accounts{ + "admin": "password", + "foo": "bar", + "bar": "foo", + }) + + user, found := pairs.searchCredential(authorizationHeader("admin", "password")) + assert.Equal(t, "admin", user) + assert.True(t, found) + + user, found = pairs.searchCredential(authorizationHeader("foo", "bar")) + assert.Equal(t, "foo", user) + assert.True(t, found) + + user, found = pairs.searchCredential(authorizationHeader("bar", "foo")) + assert.Equal(t, "bar", user) + assert.True(t, found) + + user, found = pairs.searchCredential(authorizationHeader("admins", "password")) + assert.Empty(t, user) + assert.False(t, found) + + user, found = pairs.searchCredential(authorizationHeader("foo", "bar ")) + assert.Empty(t, user) + assert.False(t, found) + + user, found = pairs.searchCredential("") + assert.Empty(t, user) + assert.False(t, found) +} + +func TestBasicAuthAuthorizationHeader(t *testing.T) { + assert.Equal(t, "Basic YWRtaW46cGFzc3dvcmQ=", authorizationHeader("admin", "password")) +} + +func TestBasicAuthSecureCompare(t *testing.T) { + assert.True(t, secureCompare("1234567890", "1234567890")) + assert.False(t, secureCompare("123456789", "1234567890")) + assert.False(t, secureCompare("12345678900", "1234567890")) + assert.False(t, secureCompare("1234567891", "1234567890")) +} + +func TestBasicAuthSucceed(t *testing.T) { + accounts := Accounts{"admin": "password"} + router := New() + router.Use(BasicAuth(accounts)) + router.GET("/login", func(c *Context) { + c.String(200, c.MustGet(AuthUserKey).(string)) + }) + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/login", nil) + req.Header.Set("Authorization", authorizationHeader("admin", "password")) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "admin", w.Body.String()) +} + +func TestBasicAuth401(t *testing.T) { + called := false + accounts := Accounts{"foo": "bar"} + router := New() + router.Use(BasicAuth(accounts)) + router.GET("/login", func(c *Context) { + called = true + c.String(200, c.MustGet(AuthUserKey).(string)) + }) + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/login", nil) + req.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte("admin:password"))) + router.ServeHTTP(w, req) + + assert.False(t, called) + assert.Equal(t, 401, w.Code) + assert.Equal(t, "Basic realm=\"Authorization Required\"", w.HeaderMap.Get("WWW-Authenticate")) +} + +func TestBasicAuth401WithCustomRealm(t *testing.T) { + called := false + accounts := Accounts{"foo": "bar"} + router := New() + router.Use(BasicAuthForRealm(accounts, "My Custom \"Realm\"")) + router.GET("/login", func(c *Context) { + called = true + c.String(200, c.MustGet(AuthUserKey).(string)) + }) + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/login", nil) + req.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte("admin:password"))) + router.ServeHTTP(w, req) + + assert.False(t, called) + assert.Equal(t, 401, w.Code) + assert.Equal(t, "Basic realm=\"My Custom \\\"Realm\\\"\"", w.HeaderMap.Get("WWW-Authenticate")) +} diff --git a/vendor/github.com/gin-gonic/gin/benchmarks_test.go b/vendor/github.com/gin-gonic/gin/benchmarks_test.go new file mode 100644 index 000000000..e79700343 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/benchmarks_test.go @@ -0,0 +1,160 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "html/template" + "net/http" + "os" + "testing" +) + +func BenchmarkOneRoute(B *testing.B) { + router := New() + router.GET("/ping", func(c *Context) {}) + runRequest(B, router, "GET", "/ping") +} + +func BenchmarkRecoveryMiddleware(B *testing.B) { + router := New() + router.Use(Recovery()) + router.GET("/", func(c *Context) {}) + runRequest(B, router, "GET", "/") +} + +func BenchmarkLoggerMiddleware(B *testing.B) { + router := New() + router.Use(LoggerWithWriter(newMockWriter())) + router.GET("/", func(c *Context) {}) + runRequest(B, router, "GET", "/") +} + +func BenchmarkManyHandlers(B *testing.B) { + router := New() + router.Use(Recovery(), LoggerWithWriter(newMockWriter())) + router.Use(func(c *Context) {}) + router.Use(func(c *Context) {}) + router.GET("/ping", func(c *Context) {}) + runRequest(B, router, "GET", "/ping") +} + +func Benchmark5Params(B *testing.B) { + DefaultWriter = os.Stdout + router := New() + router.Use(func(c *Context) {}) + router.GET("/param/:param1/:params2/:param3/:param4/:param5", func(c *Context) {}) + runRequest(B, router, "GET", "/param/path/to/parameter/john/12345") +} + +func BenchmarkOneRouteJSON(B *testing.B) { + router := New() + data := struct { + Status string `json:"status"` + }{"ok"} + router.GET("/json", func(c *Context) { + c.JSON(200, data) + }) + runRequest(B, router, "GET", "/json") +} + +func BenchmarkOneRouteHTML(B *testing.B) { + router := New() + t := template.Must(template.New("index").Parse(` +

{{.}}

`)) + router.SetHTMLTemplate(t) + + router.GET("/html", func(c *Context) { + c.HTML(200, "index", "hola") + }) + runRequest(B, router, "GET", "/html") +} + +func BenchmarkOneRouteSet(B *testing.B) { + router := New() + router.GET("/ping", func(c *Context) { + c.Set("key", "value") + }) + runRequest(B, router, "GET", "/ping") +} + +func BenchmarkOneRouteString(B *testing.B) { + router := New() + router.GET("/text", func(c *Context) { + c.String(200, "this is a plain text") + }) + runRequest(B, router, "GET", "/text") +} + +func BenchmarkManyRoutesFist(B *testing.B) { + router := New() + router.Any("/ping", func(c *Context) {}) + runRequest(B, router, "GET", "/ping") +} + +func BenchmarkManyRoutesLast(B *testing.B) { + router := New() + router.Any("/ping", func(c *Context) {}) + runRequest(B, router, "OPTIONS", "/ping") +} + +func Benchmark404(B *testing.B) { + router := New() + router.Any("/something", func(c *Context) {}) + router.NoRoute(func(c *Context) {}) + runRequest(B, router, "GET", "/ping") +} + +func Benchmark404Many(B *testing.B) { + router := New() + router.GET("/", func(c *Context) {}) + router.GET("/path/to/something", func(c *Context) {}) + router.GET("/post/:id", func(c *Context) {}) + router.GET("/view/:id", func(c *Context) {}) + router.GET("/favicon.ico", func(c *Context) {}) + router.GET("/robots.txt", func(c *Context) {}) + router.GET("/delete/:id", func(c *Context) {}) + router.GET("/user/:id/:mode", func(c *Context) {}) + + router.NoRoute(func(c *Context) {}) + runRequest(B, router, "GET", "/viewfake") +} + +type mockWriter struct { + headers http.Header +} + +func newMockWriter() *mockWriter { + return &mockWriter{ + http.Header{}, + } +} + +func (m *mockWriter) Header() (h http.Header) { + return m.headers +} + +func (m *mockWriter) Write(p []byte) (n int, err error) { + return len(p), nil +} + +func (m *mockWriter) WriteString(s string) (n int, err error) { + return len(s), nil +} + +func (m *mockWriter) WriteHeader(int) {} + +func runRequest(B *testing.B, r *Engine, method, path string) { + // create fake request + req, err := http.NewRequest(method, path, nil) + if err != nil { + panic(err) + } + w := newMockWriter() + B.ReportAllocs() + B.ResetTimer() + for i := 0; i < B.N; i++ { + r.ServeHTTP(w, req) + } +} diff --git a/vendor/github.com/gin-gonic/gin/binding/binding.go b/vendor/github.com/gin-gonic/gin/binding/binding.go new file mode 100644 index 000000000..dc32d5387 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/binding.go @@ -0,0 +1,82 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "net/http" + + "gopkg.in/go-playground/validator.v8" +) + +const ( + MIMEJSON = "application/json" + MIMEHTML = "text/html" + MIMEXML = "application/xml" + MIMEXML2 = "text/xml" + MIMEPlain = "text/plain" + MIMEPOSTForm = "application/x-www-form-urlencoded" + MIMEMultipartPOSTForm = "multipart/form-data" + MIMEPROTOBUF = "application/x-protobuf" + MIMEMSGPACK = "application/x-msgpack" + MIMEMSGPACK2 = "application/msgpack" +) + +type Binding interface { + Name() string + Bind(*http.Request, interface{}) error +} + +type StructValidator interface { + // ValidateStruct can receive any kind of type and it should never panic, even if the configuration is not right. + // If the received type is not a struct, any validation should be skipped and nil must be returned. + // If the received type is a struct or pointer to a struct, the validation should be performed. + // If the struct is not valid or the validation itself fails, a descriptive error should be returned. + // Otherwise nil must be returned. + ValidateStruct(interface{}) error + + // RegisterValidation adds a validation Func to a Validate's map of validators denoted by the key + // NOTE: if the key already exists, the previous validation function will be replaced. + // NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation + RegisterValidation(string, validator.Func) error +} + +var Validator StructValidator = &defaultValidator{} + +var ( + JSON = jsonBinding{} + XML = xmlBinding{} + Form = formBinding{} + Query = queryBinding{} + FormPost = formPostBinding{} + FormMultipart = formMultipartBinding{} + ProtoBuf = protobufBinding{} + MsgPack = msgpackBinding{} +) + +func Default(method, contentType string) Binding { + if method == "GET" { + return Form + } + + switch contentType { + case MIMEJSON: + return JSON + case MIMEXML, MIMEXML2: + return XML + case MIMEPROTOBUF: + return ProtoBuf + case MIMEMSGPACK, MIMEMSGPACK2: + return MsgPack + default: //case MIMEPOSTForm, MIMEMultipartPOSTForm: + return Form + } +} + +func validate(obj interface{}) error { + if Validator == nil { + return nil + } + return Validator.ValidateStruct(obj) +} diff --git a/vendor/github.com/gin-gonic/gin/binding/binding_test.go b/vendor/github.com/gin-gonic/gin/binding/binding_test.go new file mode 100644 index 000000000..0c0f9f810 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/binding_test.go @@ -0,0 +1,1054 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "bytes" + "encoding/json" + "errors" + "io/ioutil" + "mime/multipart" + "net/http" + "testing" + "time" + + "github.com/gin-gonic/gin/binding/example" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/ugorji/go/codec" +) + +type FooStruct struct { + Foo string `msgpack:"foo" json:"foo" form:"foo" xml:"foo" binding:"required"` +} + +type FooBarStruct struct { + FooStruct + Bar string `msgpack:"bar" json:"bar" form:"bar" xml:"bar" binding:"required"` +} + +type FooStructUseNumber struct { + Foo interface{} `json:"foo" binding:"required"` +} + +type FooBarStructForTimeType struct { + TimeFoo time.Time `form:"time_foo" time_format:"2006-01-02" time_utc:"1" time_location:"Asia/Chongqing"` + TimeBar time.Time `form:"time_bar" time_format:"2006-01-02" time_utc:"1"` +} + +type FooStructForTimeTypeNotFormat struct { + TimeFoo time.Time `form:"time_foo"` +} + +type FooStructForTimeTypeFailFormat struct { + TimeFoo time.Time `form:"time_foo" time_format:"2017-11-15"` +} + +type FooStructForTimeTypeFailLocation struct { + TimeFoo time.Time `form:"time_foo" time_format:"2006-01-02" time_location:"/asia/chongqing"` +} + +type FooStructForMapType struct { + // Unknown type: not support map + MapFoo map[string]interface{} `form:"map_foo"` +} + +type InvalidNameType struct { + TestName string `invalid_name:"test_name"` +} + +type InvalidNameMapType struct { + TestName struct { + MapFoo map[string]interface{} `form:"map_foo"` + } +} + +type FooStructForSliceType struct { + SliceFoo []int `form:"slice_foo"` +} + +type FooStructForSliceMapType struct { + // Unknown type: not support map + SliceMapFoo []map[string]interface{} `form:"slice_map_foo"` +} + +type FooBarStructForIntType struct { + IntFoo int `form:"int_foo"` + IntBar int `form:"int_bar" binding:"required"` +} + +type FooBarStructForInt8Type struct { + Int8Foo int8 `form:"int8_foo"` + Int8Bar int8 `form:"int8_bar" binding:"required"` +} + +type FooBarStructForInt16Type struct { + Int16Foo int16 `form:"int16_foo"` + Int16Bar int16 `form:"int16_bar" binding:"required"` +} + +type FooBarStructForInt32Type struct { + Int32Foo int32 `form:"int32_foo"` + Int32Bar int32 `form:"int32_bar" binding:"required"` +} + +type FooBarStructForInt64Type struct { + Int64Foo int64 `form:"int64_foo"` + Int64Bar int64 `form:"int64_bar" binding:"required"` +} + +type FooBarStructForUintType struct { + UintFoo uint `form:"uint_foo"` + UintBar uint `form:"uint_bar" binding:"required"` +} + +type FooBarStructForUint8Type struct { + Uint8Foo uint8 `form:"uint8_foo"` + Uint8Bar uint8 `form:"uint8_bar" binding:"required"` +} + +type FooBarStructForUint16Type struct { + Uint16Foo uint16 `form:"uint16_foo"` + Uint16Bar uint16 `form:"uint16_bar" binding:"required"` +} + +type FooBarStructForUint32Type struct { + Uint32Foo uint32 `form:"uint32_foo"` + Uint32Bar uint32 `form:"uint32_bar" binding:"required"` +} + +type FooBarStructForUint64Type struct { + Uint64Foo uint64 `form:"uint64_foo"` + Uint64Bar uint64 `form:"uint64_bar" binding:"required"` +} + +type FooBarStructForBoolType struct { + BoolFoo bool `form:"bool_foo"` + BoolBar bool `form:"bool_bar" binding:"required"` +} + +type FooBarStructForFloat32Type struct { + Float32Foo float32 `form:"float32_foo"` + Float32Bar float32 `form:"float32_bar" binding:"required"` +} + +type FooBarStructForFloat64Type struct { + Float64Foo float64 `form:"float64_foo"` + Float64Bar float64 `form:"float64_bar" binding:"required"` +} + +func TestBindingDefault(t *testing.T) { + assert.Equal(t, Default("GET", ""), Form) + assert.Equal(t, Default("GET", MIMEJSON), Form) + + assert.Equal(t, Default("POST", MIMEJSON), JSON) + assert.Equal(t, Default("PUT", MIMEJSON), JSON) + + assert.Equal(t, Default("POST", MIMEXML), XML) + assert.Equal(t, Default("PUT", MIMEXML2), XML) + + assert.Equal(t, Default("POST", MIMEPOSTForm), Form) + assert.Equal(t, Default("PUT", MIMEPOSTForm), Form) + + assert.Equal(t, Default("POST", MIMEMultipartPOSTForm), Form) + assert.Equal(t, Default("PUT", MIMEMultipartPOSTForm), Form) + + assert.Equal(t, Default("POST", MIMEPROTOBUF), ProtoBuf) + assert.Equal(t, Default("PUT", MIMEPROTOBUF), ProtoBuf) + + assert.Equal(t, Default("POST", MIMEMSGPACK), MsgPack) + assert.Equal(t, Default("PUT", MIMEMSGPACK2), MsgPack) +} + +func TestBindingJSON(t *testing.T) { + testBodyBinding(t, + JSON, "json", + "/", "/", + `{"foo": "bar"}`, `{"bar": "foo"}`) +} + +func TestBindingJSONUseNumber(t *testing.T) { + testBodyBindingUseNumber(t, + JSON, "json", + "/", "/", + `{"foo": 123}`, `{"bar": "foo"}`) +} + +func TestBindingJSONUseNumber2(t *testing.T) { + testBodyBindingUseNumber2(t, + JSON, "json", + "/", "/", + `{"foo": 123}`, `{"bar": "foo"}`) +} + +func TestBindingForm(t *testing.T) { + testFormBinding(t, "POST", + "/", "/", + "foo=bar&bar=foo", "bar2=foo") +} + +func TestBindingForm2(t *testing.T) { + testFormBinding(t, "GET", + "/?foo=bar&bar=foo", "/?bar2=foo", + "", "") +} + +func TestBindingFormForTime(t *testing.T) { + testFormBindingForTime(t, "POST", + "/", "/", + "time_foo=2017-11-15&time_bar=", "bar2=foo") + testFormBindingForTimeNotFormat(t, "POST", + "/", "/", + "time_foo=2017-11-15", "bar2=foo") + testFormBindingForTimeFailFormat(t, "POST", + "/", "/", + "time_foo=2017-11-15", "bar2=foo") + testFormBindingForTimeFailLocation(t, "POST", + "/", "/", + "time_foo=2017-11-15", "bar2=foo") +} + +func TestBindingFormForTime2(t *testing.T) { + testFormBindingForTime(t, "GET", + "/?time_foo=2017-11-15&time_bar=", "/?bar2=foo", + "", "") + testFormBindingForTimeNotFormat(t, "GET", + "/?time_foo=2017-11-15", "/?bar2=foo", + "", "") + testFormBindingForTimeFailFormat(t, "GET", + "/?time_foo=2017-11-15", "/?bar2=foo", + "", "") + testFormBindingForTimeFailLocation(t, "GET", + "/?time_foo=2017-11-15", "/?bar2=foo", + "", "") +} + +func TestBindingFormInvalidName(t *testing.T) { + testFormBindingInvalidName(t, "POST", + "/", "/", + "test_name=bar", "bar2=foo") +} + +func TestBindingFormInvalidName2(t *testing.T) { + testFormBindingInvalidName2(t, "POST", + "/", "/", + "map_foo=bar", "bar2=foo") +} + +func TestBindingFormForType(t *testing.T) { + testFormBindingForType(t, "POST", + "/", "/", + "map_foo=", "bar2=1", "Map") + + testFormBindingForType(t, "POST", + "/", "/", + "slice_foo=1&slice_foo=2", "bar2=1&bar2=2", "Slice") + + testFormBindingForType(t, "GET", + "/?slice_foo=1&slice_foo=2", "/?bar2=1&bar2=2", + "", "", "Slice") + + testFormBindingForType(t, "POST", + "/", "/", + "slice_map_foo=1&slice_map_foo=2", "bar2=1&bar2=2", "SliceMap") + + testFormBindingForType(t, "GET", + "/?slice_map_foo=1&slice_map_foo=2", "/?bar2=1&bar2=2", + "", "", "SliceMap") + + testFormBindingForType(t, "POST", + "/", "/", + "int_foo=&int_bar=-12", "bar2=-123", "Int") + + testFormBindingForType(t, "GET", + "/?int_foo=&int_bar=-12", "/?bar2=-123", + "", "", "Int") + + testFormBindingForType(t, "POST", + "/", "/", + "int8_foo=&int8_bar=-12", "bar2=-123", "Int8") + + testFormBindingForType(t, "GET", + "/?int8_foo=&int8_bar=-12", "/?bar2=-123", + "", "", "Int8") + + testFormBindingForType(t, "POST", + "/", "/", + "int16_foo=&int16_bar=-12", "bar2=-123", "Int16") + + testFormBindingForType(t, "GET", + "/?int16_foo=&int16_bar=-12", "/?bar2=-123", + "", "", "Int16") + + testFormBindingForType(t, "POST", + "/", "/", + "int32_foo=&int32_bar=-12", "bar2=-123", "Int32") + + testFormBindingForType(t, "GET", + "/?int32_foo=&int32_bar=-12", "/?bar2=-123", + "", "", "Int32") + + testFormBindingForType(t, "POST", + "/", "/", + "int64_foo=&int64_bar=-12", "bar2=-123", "Int64") + + testFormBindingForType(t, "GET", + "/?int64_foo=&int64_bar=-12", "/?bar2=-123", + "", "", "Int64") + + testFormBindingForType(t, "POST", + "/", "/", + "uint_foo=&uint_bar=12", "bar2=123", "Uint") + + testFormBindingForType(t, "GET", + "/?uint_foo=&uint_bar=12", "/?bar2=123", + "", "", "Uint") + + testFormBindingForType(t, "POST", + "/", "/", + "uint8_foo=&uint8_bar=12", "bar2=123", "Uint8") + + testFormBindingForType(t, "GET", + "/?uint8_foo=&uint8_bar=12", "/?bar2=123", + "", "", "Uint8") + + testFormBindingForType(t, "POST", + "/", "/", + "uint16_foo=&uint16_bar=12", "bar2=123", "Uint16") + + testFormBindingForType(t, "GET", + "/?uint16_foo=&uint16_bar=12", "/?bar2=123", + "", "", "Uint16") + + testFormBindingForType(t, "POST", + "/", "/", + "uint32_foo=&uint32_bar=12", "bar2=123", "Uint32") + + testFormBindingForType(t, "GET", + "/?uint32_foo=&uint32_bar=12", "/?bar2=123", + "", "", "Uint32") + + testFormBindingForType(t, "POST", + "/", "/", + "uint64_foo=&uint64_bar=12", "bar2=123", "Uint64") + + testFormBindingForType(t, "GET", + "/?uint64_foo=&uint64_bar=12", "/?bar2=123", + "", "", "Uint64") + + testFormBindingForType(t, "POST", + "/", "/", + "bool_foo=&bool_bar=true", "bar2=true", "Bool") + + testFormBindingForType(t, "GET", + "/?bool_foo=&bool_bar=true", "/?bar2=true", + "", "", "Bool") + + testFormBindingForType(t, "POST", + "/", "/", + "float32_foo=&float32_bar=-12.34", "bar2=12.3", "Float32") + + testFormBindingForType(t, "GET", + "/?float32_foo=&float32_bar=-12.34", "/?bar2=12.3", + "", "", "Float32") + + testFormBindingForType(t, "POST", + "/", "/", + "float64_foo=&float64_bar=-12.34", "bar2=12.3", "Float64") + + testFormBindingForType(t, "GET", + "/?float64_foo=&float64_bar=-12.34", "/?bar2=12.3", + "", "", "Float64") +} + +func TestBindingQuery(t *testing.T) { + testQueryBinding(t, "POST", + "/?foo=bar&bar=foo", "/", + "foo=unused", "bar2=foo") +} + +func TestBindingQuery2(t *testing.T) { + testQueryBinding(t, "GET", + "/?foo=bar&bar=foo", "/?bar2=foo", + "foo=unused", "") +} + +func TestBindingQueryFail(t *testing.T) { + testQueryBindingFail(t, "POST", + "/?map_foo=", "/", + "map_foo=unused", "bar2=foo") +} + +func TestBindingQueryFail2(t *testing.T) { + testQueryBindingFail(t, "GET", + "/?map_foo=", "/?bar2=foo", + "map_foo=unused", "") +} + +func TestBindingXML(t *testing.T) { + testBodyBinding(t, + XML, "xml", + "/", "/", + "bar", "foo") +} + +func TestBindingXMLFail(t *testing.T) { + testBodyBindingFail(t, + XML, "xml", + "/", "/", + "bar", "foo") +} + +func createFormPostRequest() *http.Request { + req, _ := http.NewRequest("POST", "/?foo=getfoo&bar=getbar", bytes.NewBufferString("foo=bar&bar=foo")) + req.Header.Set("Content-Type", MIMEPOSTForm) + return req +} + +func createFormPostRequestFail() *http.Request { + req, _ := http.NewRequest("POST", "/?map_foo=getfoo", bytes.NewBufferString("map_foo=bar")) + req.Header.Set("Content-Type", MIMEPOSTForm) + return req +} + +func createFormMultipartRequest() *http.Request { + boundary := "--testboundary" + body := new(bytes.Buffer) + mw := multipart.NewWriter(body) + defer mw.Close() + + mw.SetBoundary(boundary) + mw.WriteField("foo", "bar") + mw.WriteField("bar", "foo") + req, _ := http.NewRequest("POST", "/?foo=getfoo&bar=getbar", body) + req.Header.Set("Content-Type", MIMEMultipartPOSTForm+"; boundary="+boundary) + return req +} + +func createFormMultipartRequestFail() *http.Request { + boundary := "--testboundary" + body := new(bytes.Buffer) + mw := multipart.NewWriter(body) + defer mw.Close() + + mw.SetBoundary(boundary) + mw.WriteField("map_foo", "bar") + req, _ := http.NewRequest("POST", "/?map_foo=getfoo", body) + req.Header.Set("Content-Type", MIMEMultipartPOSTForm+"; boundary="+boundary) + return req +} + +func TestBindingFormPost(t *testing.T) { + req := createFormPostRequest() + var obj FooBarStruct + FormPost.Bind(req, &obj) + + assert.Equal(t, FormPost.Name(), "form-urlencoded") + assert.Equal(t, obj.Foo, "bar") + assert.Equal(t, obj.Bar, "foo") +} + +func TestBindingFormPostFail(t *testing.T) { + req := createFormPostRequestFail() + var obj FooStructForMapType + err := FormPost.Bind(req, &obj) + assert.Error(t, err) +} + +func TestBindingFormMultipart(t *testing.T) { + req := createFormMultipartRequest() + var obj FooBarStruct + FormMultipart.Bind(req, &obj) + + assert.Equal(t, FormMultipart.Name(), "multipart/form-data") + assert.Equal(t, obj.Foo, "bar") + assert.Equal(t, obj.Bar, "foo") +} + +func TestBindingFormMultipartFail(t *testing.T) { + req := createFormMultipartRequestFail() + var obj FooStructForMapType + err := FormMultipart.Bind(req, &obj) + assert.Error(t, err) +} + +func TestBindingProtoBuf(t *testing.T) { + test := &example.Test{ + Label: proto.String("yes"), + } + data, _ := proto.Marshal(test) + + testProtoBodyBinding(t, + ProtoBuf, "protobuf", + "/", "/", + string(data), string(data[1:])) +} + +func TestBindingProtoBufFail(t *testing.T) { + test := &example.Test{ + Label: proto.String("yes"), + } + data, _ := proto.Marshal(test) + + testProtoBodyBindingFail(t, + ProtoBuf, "protobuf", + "/", "/", + string(data), string(data[1:])) +} + +func TestBindingMsgPack(t *testing.T) { + test := FooStruct{ + Foo: "bar", + } + + h := new(codec.MsgpackHandle) + assert.NotNil(t, h) + buf := bytes.NewBuffer([]byte{}) + assert.NotNil(t, buf) + err := codec.NewEncoder(buf, h).Encode(test) + assert.NoError(t, err) + + data := buf.Bytes() + + testMsgPackBodyBinding(t, + MsgPack, "msgpack", + "/", "/", + string(data), string(data[1:])) +} + +func TestValidationFails(t *testing.T) { + var obj FooStruct + req := requestWithBody("POST", "/", `{"bar": "foo"}`) + err := JSON.Bind(req, &obj) + assert.Error(t, err) +} + +func TestValidationDisabled(t *testing.T) { + backup := Validator + Validator = nil + defer func() { Validator = backup }() + + var obj FooStruct + req := requestWithBody("POST", "/", `{"bar": "foo"}`) + err := JSON.Bind(req, &obj) + assert.NoError(t, err) +} + +func TestExistsSucceeds(t *testing.T) { + type HogeStruct struct { + Hoge *int `json:"hoge" binding:"exists"` + } + + var obj HogeStruct + req := requestWithBody("POST", "/", `{"hoge": 0}`) + err := JSON.Bind(req, &obj) + assert.NoError(t, err) +} + +func TestExistsFails(t *testing.T) { + type HogeStruct struct { + Hoge *int `json:"foo" binding:"exists"` + } + + var obj HogeStruct + req := requestWithBody("POST", "/", `{"boen": 0}`) + err := JSON.Bind(req, &obj) + assert.Error(t, err) +} + +func testFormBinding(t *testing.T, method, path, badPath, body, badBody string) { + b := Form + assert.Equal(t, b.Name(), "form") + + obj := FooBarStruct{} + req := requestWithBody(method, path, body) + if method == "POST" { + req.Header.Add("Content-Type", MIMEPOSTForm) + } + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.Foo, "bar") + assert.Equal(t, obj.Bar, "foo") + + obj = FooBarStruct{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) +} + +func TestFormBindingFail(t *testing.T) { + b := Form + assert.Equal(t, b.Name(), "form") + + obj := FooBarStruct{} + req, _ := http.NewRequest("POST", "/", nil) + err := b.Bind(req, &obj) + assert.Error(t, err) +} + +func TestFormPostBindingFail(t *testing.T) { + b := FormPost + assert.Equal(t, b.Name(), "form-urlencoded") + + obj := FooBarStruct{} + req, _ := http.NewRequest("POST", "/", nil) + err := b.Bind(req, &obj) + assert.Error(t, err) +} + +func TestFormMultipartBindingFail(t *testing.T) { + b := FormMultipart + assert.Equal(t, b.Name(), "multipart/form-data") + + obj := FooBarStruct{} + req, _ := http.NewRequest("POST", "/", nil) + err := b.Bind(req, &obj) + assert.Error(t, err) +} + +func testFormBindingForTime(t *testing.T, method, path, badPath, body, badBody string) { + b := Form + assert.Equal(t, b.Name(), "form") + + obj := FooBarStructForTimeType{} + req := requestWithBody(method, path, body) + if method == "POST" { + req.Header.Add("Content-Type", MIMEPOSTForm) + } + err := b.Bind(req, &obj) + + assert.NoError(t, err) + assert.Equal(t, obj.TimeFoo.Unix(), int64(1510675200)) + assert.Equal(t, obj.TimeFoo.Location().String(), "Asia/Chongqing") + assert.Equal(t, obj.TimeBar.Unix(), int64(-62135596800)) + assert.Equal(t, obj.TimeBar.Location().String(), "UTC") + + obj = FooBarStructForTimeType{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) +} + +func testFormBindingForTimeNotFormat(t *testing.T, method, path, badPath, body, badBody string) { + b := Form + assert.Equal(t, b.Name(), "form") + + obj := FooStructForTimeTypeNotFormat{} + req := requestWithBody(method, path, body) + if method == "POST" { + req.Header.Add("Content-Type", MIMEPOSTForm) + } + err := b.Bind(req, &obj) + assert.Error(t, err) + + obj = FooStructForTimeTypeNotFormat{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) +} + +func testFormBindingForTimeFailFormat(t *testing.T, method, path, badPath, body, badBody string) { + b := Form + assert.Equal(t, b.Name(), "form") + + obj := FooStructForTimeTypeFailFormat{} + req := requestWithBody(method, path, body) + if method == "POST" { + req.Header.Add("Content-Type", MIMEPOSTForm) + } + err := b.Bind(req, &obj) + assert.Error(t, err) + + obj = FooStructForTimeTypeFailFormat{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) +} + +func testFormBindingForTimeFailLocation(t *testing.T, method, path, badPath, body, badBody string) { + b := Form + assert.Equal(t, b.Name(), "form") + + obj := FooStructForTimeTypeFailLocation{} + req := requestWithBody(method, path, body) + if method == "POST" { + req.Header.Add("Content-Type", MIMEPOSTForm) + } + err := b.Bind(req, &obj) + assert.Error(t, err) + + obj = FooStructForTimeTypeFailLocation{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) +} + +func testFormBindingInvalidName(t *testing.T, method, path, badPath, body, badBody string) { + b := Form + assert.Equal(t, b.Name(), "form") + + obj := InvalidNameType{} + req := requestWithBody(method, path, body) + if method == "POST" { + req.Header.Add("Content-Type", MIMEPOSTForm) + } + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.TestName, "") + + obj = InvalidNameType{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) +} + +func testFormBindingInvalidName2(t *testing.T, method, path, badPath, body, badBody string) { + b := Form + assert.Equal(t, b.Name(), "form") + + obj := InvalidNameMapType{} + req := requestWithBody(method, path, body) + if method == "POST" { + req.Header.Add("Content-Type", MIMEPOSTForm) + } + err := b.Bind(req, &obj) + assert.Error(t, err) + + obj = InvalidNameMapType{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) +} + +func testFormBindingForType(t *testing.T, method, path, badPath, body, badBody string, typ string) { + b := Form + assert.Equal(t, b.Name(), "form") + + req := requestWithBody(method, path, body) + if method == "POST" { + req.Header.Add("Content-Type", MIMEPOSTForm) + } + switch typ { + case "Int": + obj := FooBarStructForIntType{} + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.IntFoo, int(0)) + assert.Equal(t, obj.IntBar, int(-12)) + + obj = FooBarStructForIntType{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) + case "Int8": + obj := FooBarStructForInt8Type{} + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.Int8Foo, int8(0)) + assert.Equal(t, obj.Int8Bar, int8(-12)) + + obj = FooBarStructForInt8Type{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) + case "Int16": + obj := FooBarStructForInt16Type{} + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.Int16Foo, int16(0)) + assert.Equal(t, obj.Int16Bar, int16(-12)) + + obj = FooBarStructForInt16Type{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) + case "Int32": + obj := FooBarStructForInt32Type{} + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.Int32Foo, int32(0)) + assert.Equal(t, obj.Int32Bar, int32(-12)) + + obj = FooBarStructForInt32Type{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) + case "Int64": + obj := FooBarStructForInt64Type{} + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.Int64Foo, int64(0)) + assert.Equal(t, obj.Int64Bar, int64(-12)) + + obj = FooBarStructForInt64Type{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) + case "Uint": + obj := FooBarStructForUintType{} + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.UintFoo, uint(0x0)) + assert.Equal(t, obj.UintBar, uint(0xc)) + + obj = FooBarStructForUintType{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) + case "Uint8": + obj := FooBarStructForUint8Type{} + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.Uint8Foo, uint8(0x0)) + assert.Equal(t, obj.Uint8Bar, uint8(0xc)) + + obj = FooBarStructForUint8Type{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) + case "Uint16": + obj := FooBarStructForUint16Type{} + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.Uint16Foo, uint16(0x0)) + assert.Equal(t, obj.Uint16Bar, uint16(0xc)) + + obj = FooBarStructForUint16Type{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) + case "Uint32": + obj := FooBarStructForUint32Type{} + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.Uint32Foo, uint32(0x0)) + assert.Equal(t, obj.Uint32Bar, uint32(0xc)) + + obj = FooBarStructForUint32Type{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) + case "Uint64": + obj := FooBarStructForUint64Type{} + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.Uint64Foo, uint64(0x0)) + assert.Equal(t, obj.Uint64Bar, uint64(0xc)) + + obj = FooBarStructForUint64Type{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) + case "Float32": + obj := FooBarStructForFloat32Type{} + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.Float32Foo, float32(0.0)) + assert.Equal(t, obj.Float32Bar, float32(-12.34)) + + obj = FooBarStructForFloat32Type{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) + case "Float64": + obj := FooBarStructForFloat64Type{} + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.Float64Foo, float64(0.0)) + assert.Equal(t, obj.Float64Bar, float64(-12.34)) + + obj = FooBarStructForFloat64Type{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) + case "Bool": + obj := FooBarStructForBoolType{} + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.BoolFoo, false) + assert.Equal(t, obj.BoolBar, true) + + obj = FooBarStructForBoolType{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) + case "Slice": + obj := FooStructForSliceType{} + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.SliceFoo, []int{1, 2}) + + obj = FooStructForSliceType{} + req = requestWithBody(method, badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) + case "Map": + obj := FooStructForMapType{} + err := b.Bind(req, &obj) + assert.Error(t, err) + case "SliceMap": + obj := FooStructForSliceMapType{} + err := b.Bind(req, &obj) + assert.Error(t, err) + } +} + +func testQueryBinding(t *testing.T, method, path, badPath, body, badBody string) { + b := Query + assert.Equal(t, b.Name(), "query") + + obj := FooBarStruct{} + req := requestWithBody(method, path, body) + if method == "POST" { + req.Header.Add("Content-Type", MIMEPOSTForm) + } + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.Foo, "bar") + assert.Equal(t, obj.Bar, "foo") +} + +func testQueryBindingFail(t *testing.T, method, path, badPath, body, badBody string) { + b := Query + assert.Equal(t, b.Name(), "query") + + obj := FooStructForMapType{} + req := requestWithBody(method, path, body) + if method == "POST" { + req.Header.Add("Content-Type", MIMEPOSTForm) + } + err := b.Bind(req, &obj) + assert.Error(t, err) +} + +func testBodyBinding(t *testing.T, b Binding, name, path, badPath, body, badBody string) { + assert.Equal(t, b.Name(), name) + + obj := FooStruct{} + req := requestWithBody("POST", path, body) + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.Foo, "bar") + + obj = FooStruct{} + req = requestWithBody("POST", badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) +} + +func testBodyBindingUseNumber(t *testing.T, b Binding, name, path, badPath, body, badBody string) { + assert.Equal(t, b.Name(), name) + + obj := FooStructUseNumber{} + req := requestWithBody("POST", path, body) + EnableDecoderUseNumber = true + err := b.Bind(req, &obj) + assert.NoError(t, err) + // we hope it is int64(123) + v, e := obj.Foo.(json.Number).Int64() + assert.NoError(t, e) + assert.Equal(t, v, int64(123)) + + obj = FooStructUseNumber{} + req = requestWithBody("POST", badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) +} + +func testBodyBindingUseNumber2(t *testing.T, b Binding, name, path, badPath, body, badBody string) { + assert.Equal(t, b.Name(), name) + + obj := FooStructUseNumber{} + req := requestWithBody("POST", path, body) + EnableDecoderUseNumber = false + err := b.Bind(req, &obj) + assert.NoError(t, err) + // it will return float64(123) if not use EnableDecoderUseNumber + // maybe it is not hoped + assert.Equal(t, obj.Foo, float64(123)) + + obj = FooStructUseNumber{} + req = requestWithBody("POST", badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) +} + +func testBodyBindingFail(t *testing.T, b Binding, name, path, badPath, body, badBody string) { + assert.Equal(t, b.Name(), name) + + obj := FooStruct{} + req := requestWithBody("POST", path, body) + err := b.Bind(req, &obj) + assert.Error(t, err) + assert.Equal(t, obj.Foo, "") + + obj = FooStruct{} + req = requestWithBody("POST", badPath, badBody) + err = JSON.Bind(req, &obj) + assert.Error(t, err) +} + +func testProtoBodyBinding(t *testing.T, b Binding, name, path, badPath, body, badBody string) { + assert.Equal(t, b.Name(), name) + + obj := example.Test{} + req := requestWithBody("POST", path, body) + req.Header.Add("Content-Type", MIMEPROTOBUF) + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, *obj.Label, "yes") + + obj = example.Test{} + req = requestWithBody("POST", badPath, badBody) + req.Header.Add("Content-Type", MIMEPROTOBUF) + err = ProtoBuf.Bind(req, &obj) + assert.Error(t, err) +} + +type hook struct{} + +func (h hook) Read([]byte) (int, error) { + return 0, errors.New("error") +} + +func testProtoBodyBindingFail(t *testing.T, b Binding, name, path, badPath, body, badBody string) { + assert.Equal(t, b.Name(), name) + + obj := example.Test{} + req := requestWithBody("POST", path, body) + + req.Body = ioutil.NopCloser(&hook{}) + req.Header.Add("Content-Type", MIMEPROTOBUF) + err := b.Bind(req, &obj) + assert.Error(t, err) + + obj = example.Test{} + req = requestWithBody("POST", badPath, badBody) + req.Header.Add("Content-Type", MIMEPROTOBUF) + err = ProtoBuf.Bind(req, &obj) + assert.Error(t, err) +} + +func testMsgPackBodyBinding(t *testing.T, b Binding, name, path, badPath, body, badBody string) { + assert.Equal(t, b.Name(), name) + + obj := FooStruct{} + req := requestWithBody("POST", path, body) + req.Header.Add("Content-Type", MIMEMSGPACK) + err := b.Bind(req, &obj) + assert.NoError(t, err) + assert.Equal(t, obj.Foo, "bar") + + obj = FooStruct{} + req = requestWithBody("POST", badPath, badBody) + req.Header.Add("Content-Type", MIMEMSGPACK) + err = MsgPack.Bind(req, &obj) + assert.Error(t, err) +} + +func requestWithBody(method, path, body string) (req *http.Request) { + req, _ = http.NewRequest(method, path, bytes.NewBufferString(body)) + return +} diff --git a/vendor/github.com/gin-gonic/gin/binding/default_validator.go b/vendor/github.com/gin-gonic/gin/binding/default_validator.go new file mode 100644 index 000000000..6336bb6e2 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/default_validator.go @@ -0,0 +1,50 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "reflect" + "sync" + + "gopkg.in/go-playground/validator.v8" +) + +type defaultValidator struct { + once sync.Once + validate *validator.Validate +} + +var _ StructValidator = &defaultValidator{} + +func (v *defaultValidator) ValidateStruct(obj interface{}) error { + if kindOfData(obj) == reflect.Struct { + v.lazyinit() + if err := v.validate.Struct(obj); err != nil { + return error(err) + } + } + return nil +} + +func (v *defaultValidator) RegisterValidation(key string, fn validator.Func) error { + v.lazyinit() + return v.validate.RegisterValidation(key, fn) +} + +func (v *defaultValidator) lazyinit() { + v.once.Do(func() { + config := &validator.Config{TagName: "binding"} + v.validate = validator.New(config) + }) +} + +func kindOfData(data interface{}) reflect.Kind { + value := reflect.ValueOf(data) + valueType := value.Kind() + if valueType == reflect.Ptr { + valueType = value.Elem().Kind() + } + return valueType +} diff --git a/vendor/github.com/gin-gonic/gin/binding/example/test.pb.go b/vendor/github.com/gin-gonic/gin/binding/example/test.pb.go new file mode 100644 index 000000000..3de8444ff --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/example/test.pb.go @@ -0,0 +1,113 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +/* +Package example is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + Test +*/ +package example + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type FOO int32 + +const ( + FOO_X FOO = 17 +) + +var FOO_name = map[int32]string{ + 17: "X", +} +var FOO_value = map[string]int32{ + "X": 17, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} + +type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Test) Reset() { *m = Test{} } +func (m *Test) String() string { return proto.CompactTextString(m) } +func (*Test) ProtoMessage() {} + +const Default_Test_Type int32 = 77 + +func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type +} + +func (m *Test) GetReps() []int64 { + if m != nil { + return m.Reps + } + return nil +} + +func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } +func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*Test_OptionalGroup) ProtoMessage() {} + +func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) +} diff --git a/vendor/github.com/gin-gonic/gin/binding/example/test.proto b/vendor/github.com/gin-gonic/gin/binding/example/test.proto new file mode 100644 index 000000000..8ee9800aa --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/example/test.proto @@ -0,0 +1,12 @@ +package example; + +enum FOO {X=17;}; + +message Test { + required string label = 1; + optional int32 type = 2[default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4{ + required string RequiredField = 5; + } +} diff --git a/vendor/github.com/gin-gonic/gin/binding/form.go b/vendor/github.com/gin-gonic/gin/binding/form.go new file mode 100644 index 000000000..0be59660b --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/form.go @@ -0,0 +1,56 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import "net/http" + +const defaultMemory = 32 * 1024 * 1024 + +type formBinding struct{} +type formPostBinding struct{} +type formMultipartBinding struct{} + +func (formBinding) Name() string { + return "form" +} + +func (formBinding) Bind(req *http.Request, obj interface{}) error { + if err := req.ParseForm(); err != nil { + return err + } + req.ParseMultipartForm(defaultMemory) + if err := mapForm(obj, req.Form); err != nil { + return err + } + return validate(obj) +} + +func (formPostBinding) Name() string { + return "form-urlencoded" +} + +func (formPostBinding) Bind(req *http.Request, obj interface{}) error { + if err := req.ParseForm(); err != nil { + return err + } + if err := mapForm(obj, req.PostForm); err != nil { + return err + } + return validate(obj) +} + +func (formMultipartBinding) Name() string { + return "multipart/form-data" +} + +func (formMultipartBinding) Bind(req *http.Request, obj interface{}) error { + if err := req.ParseMultipartForm(defaultMemory); err != nil { + return err + } + if err := mapForm(obj, req.MultipartForm.Value); err != nil { + return err + } + return validate(obj) +} diff --git a/vendor/github.com/gin-gonic/gin/binding/form_mapping.go b/vendor/github.com/gin-gonic/gin/binding/form_mapping.go new file mode 100644 index 000000000..dd8c62468 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/form_mapping.go @@ -0,0 +1,181 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "errors" + "reflect" + "strconv" + "time" +) + +func mapForm(ptr interface{}, form map[string][]string) error { + typ := reflect.TypeOf(ptr).Elem() + val := reflect.ValueOf(ptr).Elem() + for i := 0; i < typ.NumField(); i++ { + typeField := typ.Field(i) + structField := val.Field(i) + if !structField.CanSet() { + continue + } + + structFieldKind := structField.Kind() + inputFieldName := typeField.Tag.Get("form") + if inputFieldName == "" { + inputFieldName = typeField.Name + + // if "form" tag is nil, we inspect if the field is a struct. + // this would not make sense for JSON parsing but it does for a form + // since data is flatten + if structFieldKind == reflect.Struct { + err := mapForm(structField.Addr().Interface(), form) + if err != nil { + return err + } + continue + } + } + inputValue, exists := form[inputFieldName] + if !exists { + continue + } + + numElems := len(inputValue) + if structFieldKind == reflect.Slice && numElems > 0 { + sliceOf := structField.Type().Elem().Kind() + slice := reflect.MakeSlice(structField.Type(), numElems, numElems) + for i := 0; i < numElems; i++ { + if err := setWithProperType(sliceOf, inputValue[i], slice.Index(i)); err != nil { + return err + } + } + val.Field(i).Set(slice) + } else { + if _, isTime := structField.Interface().(time.Time); isTime { + if err := setTimeField(inputValue[0], typeField, structField); err != nil { + return err + } + continue + } + if err := setWithProperType(typeField.Type.Kind(), inputValue[0], structField); err != nil { + return err + } + } + } + return nil +} + +func setWithProperType(valueKind reflect.Kind, val string, structField reflect.Value) error { + switch valueKind { + case reflect.Int: + return setIntField(val, 0, structField) + case reflect.Int8: + return setIntField(val, 8, structField) + case reflect.Int16: + return setIntField(val, 16, structField) + case reflect.Int32: + return setIntField(val, 32, structField) + case reflect.Int64: + return setIntField(val, 64, structField) + case reflect.Uint: + return setUintField(val, 0, structField) + case reflect.Uint8: + return setUintField(val, 8, structField) + case reflect.Uint16: + return setUintField(val, 16, structField) + case reflect.Uint32: + return setUintField(val, 32, structField) + case reflect.Uint64: + return setUintField(val, 64, structField) + case reflect.Bool: + return setBoolField(val, structField) + case reflect.Float32: + return setFloatField(val, 32, structField) + case reflect.Float64: + return setFloatField(val, 64, structField) + case reflect.String: + structField.SetString(val) + default: + return errors.New("Unknown type") + } + return nil +} + +func setIntField(val string, bitSize int, field reflect.Value) error { + if val == "" { + val = "0" + } + intVal, err := strconv.ParseInt(val, 10, bitSize) + if err == nil { + field.SetInt(intVal) + } + return err +} + +func setUintField(val string, bitSize int, field reflect.Value) error { + if val == "" { + val = "0" + } + uintVal, err := strconv.ParseUint(val, 10, bitSize) + if err == nil { + field.SetUint(uintVal) + } + return err +} + +func setBoolField(val string, field reflect.Value) error { + if val == "" { + val = "false" + } + boolVal, err := strconv.ParseBool(val) + if err == nil { + field.SetBool(boolVal) + } + return nil +} + +func setFloatField(val string, bitSize int, field reflect.Value) error { + if val == "" { + val = "0.0" + } + floatVal, err := strconv.ParseFloat(val, bitSize) + if err == nil { + field.SetFloat(floatVal) + } + return err +} + +func setTimeField(val string, structField reflect.StructField, value reflect.Value) error { + timeFormat := structField.Tag.Get("time_format") + if timeFormat == "" { + return errors.New("Blank time format") + } + + if val == "" { + value.Set(reflect.ValueOf(time.Time{})) + return nil + } + + l := time.Local + if isUTC, _ := strconv.ParseBool(structField.Tag.Get("time_utc")); isUTC { + l = time.UTC + } + + if locTag := structField.Tag.Get("time_location"); locTag != "" { + loc, err := time.LoadLocation(locTag) + if err != nil { + return err + } + l = loc + } + + t, err := time.ParseInLocation(timeFormat, val, l) + if err != nil { + return err + } + + value.Set(reflect.ValueOf(t)) + return nil +} diff --git a/vendor/github.com/gin-gonic/gin/binding/json.go b/vendor/github.com/gin-gonic/gin/binding/json.go new file mode 100644 index 000000000..b7c856af9 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/json.go @@ -0,0 +1,30 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "net/http" + + "github.com/gin-gonic/gin/json" +) + +var EnableDecoderUseNumber = false + +type jsonBinding struct{} + +func (jsonBinding) Name() string { + return "json" +} + +func (jsonBinding) Bind(req *http.Request, obj interface{}) error { + decoder := json.NewDecoder(req.Body) + if EnableDecoderUseNumber { + decoder.UseNumber() + } + if err := decoder.Decode(obj); err != nil { + return err + } + return validate(obj) +} diff --git a/vendor/github.com/gin-gonic/gin/binding/msgpack.go b/vendor/github.com/gin-gonic/gin/binding/msgpack.go new file mode 100644 index 000000000..7faea4b5c --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/msgpack.go @@ -0,0 +1,24 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "net/http" + + "github.com/ugorji/go/codec" +) + +type msgpackBinding struct{} + +func (msgpackBinding) Name() string { + return "msgpack" +} + +func (msgpackBinding) Bind(req *http.Request, obj interface{}) error { + if err := codec.NewDecoder(req.Body, new(codec.MsgpackHandle)).Decode(&obj); err != nil { + return err + } + return validate(obj) +} diff --git a/vendor/github.com/gin-gonic/gin/binding/protobuf.go b/vendor/github.com/gin-gonic/gin/binding/protobuf.go new file mode 100644 index 000000000..c7eb84e9b --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/protobuf.go @@ -0,0 +1,35 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "io/ioutil" + "net/http" + + "github.com/golang/protobuf/proto" +) + +type protobufBinding struct{} + +func (protobufBinding) Name() string { + return "protobuf" +} + +func (protobufBinding) Bind(req *http.Request, obj interface{}) error { + + buf, err := ioutil.ReadAll(req.Body) + if err != nil { + return err + } + + if err = proto.Unmarshal(buf, obj.(proto.Message)); err != nil { + return err + } + + //Here it's same to return validate(obj), but util now we cann't add `binding:""` to the struct + //which automatically generate by gen-proto + return nil + //return validate(obj) +} diff --git a/vendor/github.com/gin-gonic/gin/binding/query.go b/vendor/github.com/gin-gonic/gin/binding/query.go new file mode 100644 index 000000000..219743f2a --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/query.go @@ -0,0 +1,21 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import "net/http" + +type queryBinding struct{} + +func (queryBinding) Name() string { + return "query" +} + +func (queryBinding) Bind(req *http.Request, obj interface{}) error { + values := req.URL.Query() + if err := mapForm(obj, values); err != nil { + return err + } + return validate(obj) +} diff --git a/vendor/github.com/gin-gonic/gin/binding/validate_test.go b/vendor/github.com/gin-gonic/gin/binding/validate_test.go new file mode 100644 index 000000000..8ca799896 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/validate_test.go @@ -0,0 +1,233 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "bytes" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "gopkg.in/go-playground/validator.v8" +) + +type testInterface interface { + String() string +} + +type substructNoValidation struct { + IString string + IInt int +} + +type mapNoValidationSub map[string]substructNoValidation + +type structNoValidationValues struct { + substructNoValidation + + Boolean bool + + Uinteger uint + Integer int + Integer8 int8 + Integer16 int16 + Integer32 int32 + Integer64 int64 + Uinteger8 uint8 + Uinteger16 uint16 + Uinteger32 uint32 + Uinteger64 uint64 + + Float32 float32 + Float64 float64 + + String string + + Date time.Time + + Struct substructNoValidation + InlinedStruct struct { + String []string + Integer int + } + + IntSlice []int + IntPointerSlice []*int + StructPointerSlice []*substructNoValidation + StructSlice []substructNoValidation + InterfaceSlice []testInterface + + UniversalInterface interface{} + CustomInterface testInterface + + FloatMap map[string]float32 + StructMap mapNoValidationSub +} + +func createNoValidationValues() structNoValidationValues { + integer := 1 + s := structNoValidationValues{ + Boolean: true, + Uinteger: 1 << 29, + Integer: -10000, + Integer8: 120, + Integer16: -20000, + Integer32: 1 << 29, + Integer64: 1 << 61, + Uinteger8: 250, + Uinteger16: 50000, + Uinteger32: 1 << 31, + Uinteger64: 1 << 62, + Float32: 123.456, + Float64: 123.456789, + String: "text", + Date: time.Time{}, + CustomInterface: &bytes.Buffer{}, + Struct: substructNoValidation{}, + IntSlice: []int{-3, -2, 1, 0, 1, 2, 3}, + IntPointerSlice: []*int{&integer}, + StructSlice: []substructNoValidation{}, + UniversalInterface: 1.2, + FloatMap: map[string]float32{ + "foo": 1.23, + "bar": 232.323, + }, + StructMap: mapNoValidationSub{ + "foo": substructNoValidation{}, + "bar": substructNoValidation{}, + }, + // StructPointerSlice []noValidationSub + // InterfaceSlice []testInterface + } + s.InlinedStruct.Integer = 1000 + s.InlinedStruct.String = []string{"first", "second"} + s.IString = "substring" + s.IInt = 987654 + return s +} + +func TestValidateNoValidationValues(t *testing.T) { + origin := createNoValidationValues() + test := createNoValidationValues() + empty := structNoValidationValues{} + + assert.Nil(t, validate(test)) + assert.Nil(t, validate(&test)) + assert.Nil(t, validate(empty)) + assert.Nil(t, validate(&empty)) + + assert.Equal(t, origin, test) +} + +type structNoValidationPointer struct { + substructNoValidation + + Boolean bool + + Uinteger *uint + Integer *int + Integer8 *int8 + Integer16 *int16 + Integer32 *int32 + Integer64 *int64 + Uinteger8 *uint8 + Uinteger16 *uint16 + Uinteger32 *uint32 + Uinteger64 *uint64 + + Float32 *float32 + Float64 *float64 + + String *string + + Date *time.Time + + Struct *substructNoValidation + + IntSlice *[]int + IntPointerSlice *[]*int + StructPointerSlice *[]*substructNoValidation + StructSlice *[]substructNoValidation + InterfaceSlice *[]testInterface + + FloatMap *map[string]float32 + StructMap *mapNoValidationSub +} + +func TestValidateNoValidationPointers(t *testing.T) { + //origin := createNoValidation_values() + //test := createNoValidation_values() + empty := structNoValidationPointer{} + + //assert.Nil(t, validate(test)) + //assert.Nil(t, validate(&test)) + assert.Nil(t, validate(empty)) + assert.Nil(t, validate(&empty)) + + //assert.Equal(t, origin, test) +} + +type Object map[string]interface{} + +func TestValidatePrimitives(t *testing.T) { + obj := Object{"foo": "bar", "bar": 1} + assert.NoError(t, validate(obj)) + assert.NoError(t, validate(&obj)) + assert.Equal(t, obj, Object{"foo": "bar", "bar": 1}) + + obj2 := []Object{{"foo": "bar", "bar": 1}, {"foo": "bar", "bar": 1}} + assert.NoError(t, validate(obj2)) + assert.NoError(t, validate(&obj2)) + + nu := 10 + assert.NoError(t, validate(nu)) + assert.NoError(t, validate(&nu)) + assert.Equal(t, nu, 10) + + str := "value" + assert.NoError(t, validate(str)) + assert.NoError(t, validate(&str)) + assert.Equal(t, str, "value") +} + +// structCustomValidation is a helper struct we use to check that +// custom validation can be registered on it. +// The `notone` binding directive is for custom validation and registered later. +type structCustomValidation struct { + Integer int `binding:"notone"` +} + +// notOne is a custom validator meant to be used with `validator.v8` library. +// The method signature for `v9` is significantly different and this function +// would need to be changed for tests to pass after upgrade. +// See https://github.com/gin-gonic/gin/pull/1015. +func notOne( + v *validator.Validate, topStruct reflect.Value, currentStructOrField reflect.Value, + field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string, +) bool { + if val, ok := field.Interface().(int); ok { + return val != 1 + } + return false +} + +func TestRegisterValidation(t *testing.T) { + // This validates that the function `notOne` matches + // the expected function signature by `defaultValidator` + // and by extension the validator library. + err := Validator.RegisterValidation("notone", notOne) + // Check that we can register custom validation without error + assert.Nil(t, err) + + // Create an instance which will fail validation + withOne := structCustomValidation{Integer: 1} + errs := validate(withOne) + + // Check that we got back non-nil errs + assert.NotNil(t, errs) + // Check that the error matches expactation + assert.Error(t, errs, "", "", "notone") +} diff --git a/vendor/github.com/gin-gonic/gin/binding/xml.go b/vendor/github.com/gin-gonic/gin/binding/xml.go new file mode 100644 index 000000000..f84a6b7f1 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/xml.go @@ -0,0 +1,24 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "encoding/xml" + "net/http" +) + +type xmlBinding struct{} + +func (xmlBinding) Name() string { + return "xml" +} + +func (xmlBinding) Bind(req *http.Request, obj interface{}) error { + decoder := xml.NewDecoder(req.Body) + if err := decoder.Decode(obj); err != nil { + return err + } + return validate(obj) +} diff --git a/vendor/github.com/gin-gonic/gin/codecov.yml b/vendor/github.com/gin-gonic/gin/codecov.yml new file mode 100644 index 000000000..c9c9a522d --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/codecov.yml @@ -0,0 +1,5 @@ +coverage: + notify: + gitter: + default: + url: https://webhooks.gitter.im/e/d90dcdeeab2f1e357165 diff --git a/vendor/github.com/gin-gonic/gin/context.go b/vendor/github.com/gin-gonic/gin/context.go new file mode 100644 index 000000000..90d4c6e59 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/context.go @@ -0,0 +1,822 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "errors" + "io" + "io/ioutil" + "math" + "mime/multipart" + "net" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/gin-contrib/sse" + "github.com/gin-gonic/gin/binding" + "github.com/gin-gonic/gin/render" +) + +// Content-Type MIME of the most common data formats. +const ( + MIMEJSON = binding.MIMEJSON + MIMEHTML = binding.MIMEHTML + MIMEXML = binding.MIMEXML + MIMEXML2 = binding.MIMEXML2 + MIMEPlain = binding.MIMEPlain + MIMEPOSTForm = binding.MIMEPOSTForm + MIMEMultipartPOSTForm = binding.MIMEMultipartPOSTForm +) + +const abortIndex int8 = math.MaxInt8 / 2 + +// Context is the most important part of gin. It allows us to pass variables between middleware, +// manage the flow, validate the JSON of a request and render a JSON response for example. +type Context struct { + writermem responseWriter + Request *http.Request + Writer ResponseWriter + + Params Params + handlers HandlersChain + index int8 + + engine *Engine + + // Keys is a key/value pair exclusively for the context of each request. + Keys map[string]interface{} + + // Errors is a list of errors attached to all the handlers/middlewares who used this context. + Errors errorMsgs + + // Accepted defines a list of manually accepted formats for content negotiation. + Accepted []string +} + +/************************************/ +/********** CONTEXT CREATION ********/ +/************************************/ + +func (c *Context) reset() { + c.Writer = &c.writermem + c.Params = c.Params[0:0] + c.handlers = nil + c.index = -1 + c.Keys = nil + c.Errors = c.Errors[0:0] + c.Accepted = nil +} + +// Copy returns a copy of the current context that can be safely used outside the request's scope. +// This has to be used when the context has to be passed to a goroutine. +func (c *Context) Copy() *Context { + var cp = *c + cp.writermem.ResponseWriter = nil + cp.Writer = &cp.writermem + cp.index = abortIndex + cp.handlers = nil + return &cp +} + +// HandlerName returns the main handler's name. For example if the handler is "handleGetUsers()", +// this function will return "main.handleGetUsers". +func (c *Context) HandlerName() string { + return nameOfFunction(c.handlers.Last()) +} + +// Handler returns the main handler. +func (c *Context) Handler() HandlerFunc { + return c.handlers.Last() +} + +/************************************/ +/*********** FLOW CONTROL ***********/ +/************************************/ + +// Next should be used only inside middleware. +// It executes the pending handlers in the chain inside the calling handler. +// See example in GitHub. +func (c *Context) Next() { + c.index++ + for s := int8(len(c.handlers)); c.index < s; c.index++ { + c.handlers[c.index](c) + } +} + +// IsAborted returns true if the current context was aborted. +func (c *Context) IsAborted() bool { + return c.index >= abortIndex +} + +// Abort prevents pending handlers from being called. Note that this will not stop the current handler. +// Let's say you have an authorization middleware that validates that the current request is authorized. +// If the authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers +// for this request are not called. +func (c *Context) Abort() { + c.index = abortIndex +} + +// AbortWithStatus calls `Abort()` and writes the headers with the specified status code. +// For example, a failed attempt to authenticate a request could use: context.AbortWithStatus(401). +func (c *Context) AbortWithStatus(code int) { + c.Status(code) + c.Writer.WriteHeaderNow() + c.Abort() +} + +// AbortWithStatusJSON calls `Abort()` and then `JSON` internally. +// This method stops the chain, writes the status code and return a JSON body. +// It also sets the Content-Type as "application/json". +func (c *Context) AbortWithStatusJSON(code int, jsonObj interface{}) { + c.Abort() + c.JSON(code, jsonObj) +} + +// AbortWithError calls `AbortWithStatus()` and `Error()` internally. +// This method stops the chain, writes the status code and pushes the specified error to `c.Errors`. +// See Context.Error() for more details. +func (c *Context) AbortWithError(code int, err error) *Error { + c.AbortWithStatus(code) + return c.Error(err) +} + +/************************************/ +/********* ERROR MANAGEMENT *********/ +/************************************/ + +// Error attaches an error to the current context. The error is pushed to a list of errors. +// It's a good idea to call Error for each error that occurred during the resolution of a request. +// A middleware can be used to collect all the errors and push them to a database together, +// print a log, or append it in the HTTP response. +// Error will panic if err is nil. +func (c *Context) Error(err error) *Error { + if err == nil { + panic("err is nil") + } + var parsedError *Error + switch err.(type) { + case *Error: + parsedError = err.(*Error) + default: + parsedError = &Error{ + Err: err, + Type: ErrorTypePrivate, + } + } + c.Errors = append(c.Errors, parsedError) + return parsedError +} + +/************************************/ +/******** METADATA MANAGEMENT********/ +/************************************/ + +// Set is used to store a new key/value pair exclusively for this context. +// It also lazy initializes c.Keys if it was not used previously. +func (c *Context) Set(key string, value interface{}) { + if c.Keys == nil { + c.Keys = make(map[string]interface{}) + } + c.Keys[key] = value +} + +// Get returns the value for the given key, ie: (value, true). +// If the value does not exists it returns (nil, false) +func (c *Context) Get(key string) (value interface{}, exists bool) { + value, exists = c.Keys[key] + return +} + +// MustGet returns the value for the given key if it exists, otherwise it panics. +func (c *Context) MustGet(key string) interface{} { + if value, exists := c.Get(key); exists { + return value + } + panic("Key \"" + key + "\" does not exist") +} + +// GetString returns the value associated with the key as a string. +func (c *Context) GetString(key string) (s string) { + if val, ok := c.Get(key); ok && val != nil { + s, _ = val.(string) + } + return +} + +// GetBool returns the value associated with the key as a boolean. +func (c *Context) GetBool(key string) (b bool) { + if val, ok := c.Get(key); ok && val != nil { + b, _ = val.(bool) + } + return +} + +// GetInt returns the value associated with the key as an integer. +func (c *Context) GetInt(key string) (i int) { + if val, ok := c.Get(key); ok && val != nil { + i, _ = val.(int) + } + return +} + +// GetInt64 returns the value associated with the key as an integer. +func (c *Context) GetInt64(key string) (i64 int64) { + if val, ok := c.Get(key); ok && val != nil { + i64, _ = val.(int64) + } + return +} + +// GetFloat64 returns the value associated with the key as a float64. +func (c *Context) GetFloat64(key string) (f64 float64) { + if val, ok := c.Get(key); ok && val != nil { + f64, _ = val.(float64) + } + return +} + +// GetTime returns the value associated with the key as time. +func (c *Context) GetTime(key string) (t time.Time) { + if val, ok := c.Get(key); ok && val != nil { + t, _ = val.(time.Time) + } + return +} + +// GetDuration returns the value associated with the key as a duration. +func (c *Context) GetDuration(key string) (d time.Duration) { + if val, ok := c.Get(key); ok && val != nil { + d, _ = val.(time.Duration) + } + return +} + +// GetStringSlice returns the value associated with the key as a slice of strings. +func (c *Context) GetStringSlice(key string) (ss []string) { + if val, ok := c.Get(key); ok && val != nil { + ss, _ = val.([]string) + } + return +} + +// GetStringMap returns the value associated with the key as a map of interfaces. +func (c *Context) GetStringMap(key string) (sm map[string]interface{}) { + if val, ok := c.Get(key); ok && val != nil { + sm, _ = val.(map[string]interface{}) + } + return +} + +// GetStringMapString returns the value associated with the key as a map of strings. +func (c *Context) GetStringMapString(key string) (sms map[string]string) { + if val, ok := c.Get(key); ok && val != nil { + sms, _ = val.(map[string]string) + } + return +} + +// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. +func (c *Context) GetStringMapStringSlice(key string) (smss map[string][]string) { + if val, ok := c.Get(key); ok && val != nil { + smss, _ = val.(map[string][]string) + } + return +} + +/************************************/ +/************ INPUT DATA ************/ +/************************************/ + +// Param returns the value of the URL param. +// It is a shortcut for c.Params.ByName(key) +// router.GET("/user/:id", func(c *gin.Context) { +// // a GET request to /user/john +// id := c.Param("id") // id == "john" +// }) +func (c *Context) Param(key string) string { + return c.Params.ByName(key) +} + +// Query returns the keyed url query value if it exists, +// otherwise it returns an empty string `("")`. +// It is shortcut for `c.Request.URL.Query().Get(key)` +// GET /path?id=1234&name=Manu&value= +// c.Query("id") == "1234" +// c.Query("name") == "Manu" +// c.Query("value") == "" +// c.Query("wtf") == "" +func (c *Context) Query(key string) string { + value, _ := c.GetQuery(key) + return value +} + +// DefaultQuery returns the keyed url query value if it exists, +// otherwise it returns the specified defaultValue string. +// See: Query() and GetQuery() for further information. +// GET /?name=Manu&lastname= +// c.DefaultQuery("name", "unknown") == "Manu" +// c.DefaultQuery("id", "none") == "none" +// c.DefaultQuery("lastname", "none") == "" +func (c *Context) DefaultQuery(key, defaultValue string) string { + if value, ok := c.GetQuery(key); ok { + return value + } + return defaultValue +} + +// GetQuery is like Query(), it returns the keyed url query value +// if it exists `(value, true)` (even when the value is an empty string), +// otherwise it returns `("", false)`. +// It is shortcut for `c.Request.URL.Query().Get(key)` +// GET /?name=Manu&lastname= +// ("Manu", true) == c.GetQuery("name") +// ("", false) == c.GetQuery("id") +// ("", true) == c.GetQuery("lastname") +func (c *Context) GetQuery(key string) (string, bool) { + if values, ok := c.GetQueryArray(key); ok { + return values[0], ok + } + return "", false +} + +// QueryArray returns a slice of strings for a given query key. +// The length of the slice depends on the number of params with the given key. +func (c *Context) QueryArray(key string) []string { + values, _ := c.GetQueryArray(key) + return values +} + +// GetQueryArray returns a slice of strings for a given query key, plus +// a boolean value whether at least one value exists for the given key. +func (c *Context) GetQueryArray(key string) ([]string, bool) { + if values, ok := c.Request.URL.Query()[key]; ok && len(values) > 0 { + return values, true + } + return []string{}, false +} + +// PostForm returns the specified key from a POST urlencoded form or multipart form +// when it exists, otherwise it returns an empty string `("")`. +func (c *Context) PostForm(key string) string { + value, _ := c.GetPostForm(key) + return value +} + +// DefaultPostForm returns the specified key from a POST urlencoded form or multipart form +// when it exists, otherwise it returns the specified defaultValue string. +// See: PostForm() and GetPostForm() for further information. +func (c *Context) DefaultPostForm(key, defaultValue string) string { + if value, ok := c.GetPostForm(key); ok { + return value + } + return defaultValue +} + +// GetPostForm is like PostForm(key). It returns the specified key from a POST urlencoded +// form or multipart form when it exists `(value, true)` (even when the value is an empty string), +// otherwise it returns ("", false). +// For example, during a PATCH request to update the user's email: +// email=mail@example.com --> ("mail@example.com", true) := GetPostForm("email") // set email to "mail@example.com" +// email= --> ("", true) := GetPostForm("email") // set email to "" +// --> ("", false) := GetPostForm("email") // do nothing with email +func (c *Context) GetPostForm(key string) (string, bool) { + if values, ok := c.GetPostFormArray(key); ok { + return values[0], ok + } + return "", false +} + +// PostFormArray returns a slice of strings for a given form key. +// The length of the slice depends on the number of params with the given key. +func (c *Context) PostFormArray(key string) []string { + values, _ := c.GetPostFormArray(key) + return values +} + +// GetPostFormArray returns a slice of strings for a given form key, plus +// a boolean value whether at least one value exists for the given key. +func (c *Context) GetPostFormArray(key string) ([]string, bool) { + req := c.Request + req.ParseForm() + req.ParseMultipartForm(c.engine.MaxMultipartMemory) + if values := req.PostForm[key]; len(values) > 0 { + return values, true + } + if req.MultipartForm != nil && req.MultipartForm.File != nil { + if values := req.MultipartForm.Value[key]; len(values) > 0 { + return values, true + } + } + return []string{}, false +} + +// FormFile returns the first file for the provided form key. +func (c *Context) FormFile(name string) (*multipart.FileHeader, error) { + _, fh, err := c.Request.FormFile(name) + return fh, err +} + +// MultipartForm is the parsed multipart form, including file uploads. +func (c *Context) MultipartForm() (*multipart.Form, error) { + err := c.Request.ParseMultipartForm(c.engine.MaxMultipartMemory) + return c.Request.MultipartForm, err +} + +// SaveUploadedFile uploads the form file to specific dst. +func (c *Context) SaveUploadedFile(file *multipart.FileHeader, dst string) error { + src, err := file.Open() + if err != nil { + return err + } + defer src.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + + io.Copy(out, src) + return nil +} + +// Bind checks the Content-Type to select a binding engine automatically, +// Depending the "Content-Type" header different bindings are used: +// "application/json" --> JSON binding +// "application/xml" --> XML binding +// otherwise --> returns an error. +// It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input. +// It decodes the json payload into the struct specified as a pointer. +// It writes a 400 error and sets Content-Type header "text/plain" in the response if input is not valid. +func (c *Context) Bind(obj interface{}) error { + b := binding.Default(c.Request.Method, c.ContentType()) + return c.MustBindWith(obj, b) +} + +// BindJSON is a shortcut for c.MustBindWith(obj, binding.JSON). +func (c *Context) BindJSON(obj interface{}) error { + return c.MustBindWith(obj, binding.JSON) +} + +// BindQuery is a shortcut for c.MustBindWith(obj, binding.Query). +func (c *Context) BindQuery(obj interface{}) error { + return c.MustBindWith(obj, binding.Query) +} + +// MustBindWith binds the passed struct pointer using the specified binding engine. +// It will abort the request with HTTP 400 if any error ocurrs. +// See the binding package. +func (c *Context) MustBindWith(obj interface{}, b binding.Binding) (err error) { + if err = c.ShouldBindWith(obj, b); err != nil { + c.AbortWithError(400, err).SetType(ErrorTypeBind) + } + + return +} + +// ShouldBind checks the Content-Type to select a binding engine automatically, +// Depending the "Content-Type" header different bindings are used: +// "application/json" --> JSON binding +// "application/xml" --> XML binding +// otherwise --> returns an error +// It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input. +// It decodes the json payload into the struct specified as a pointer. +// Like c.Bind() but this method does not set the response status code to 400 and abort if the json is not valid. +func (c *Context) ShouldBind(obj interface{}) error { + b := binding.Default(c.Request.Method, c.ContentType()) + return c.ShouldBindWith(obj, b) +} + +// ShouldBindJSON is a shortcut for c.ShouldBindWith(obj, binding.JSON). +func (c *Context) ShouldBindJSON(obj interface{}) error { + return c.ShouldBindWith(obj, binding.JSON) +} + +// ShouldBindQuery is a shortcut for c.ShouldBindWith(obj, binding.Query). +func (c *Context) ShouldBindQuery(obj interface{}) error { + return c.ShouldBindWith(obj, binding.Query) +} + +// ShouldBindWith binds the passed struct pointer using the specified binding engine. +// See the binding package. +func (c *Context) ShouldBindWith(obj interface{}, b binding.Binding) error { + return b.Bind(c.Request, obj) +} + +// ClientIP implements a best effort algorithm to return the real client IP, it parses +// X-Real-IP and X-Forwarded-For in order to work properly with reverse-proxies such us: nginx or haproxy. +// Use X-Forwarded-For before X-Real-Ip as nginx uses X-Real-Ip with the proxy's IP. +func (c *Context) ClientIP() string { + if c.engine.ForwardedByClientIP { + clientIP := c.requestHeader("X-Forwarded-For") + if index := strings.IndexByte(clientIP, ','); index >= 0 { + clientIP = clientIP[0:index] + } + clientIP = strings.TrimSpace(clientIP) + if clientIP != "" { + return clientIP + } + clientIP = strings.TrimSpace(c.requestHeader("X-Real-Ip")) + if clientIP != "" { + return clientIP + } + } + + if c.engine.AppEngine { + if addr := c.requestHeader("X-Appengine-Remote-Addr"); addr != "" { + return addr + } + } + + if ip, _, err := net.SplitHostPort(strings.TrimSpace(c.Request.RemoteAddr)); err == nil { + return ip + } + + return "" +} + +// ContentType returns the Content-Type header of the request. +func (c *Context) ContentType() string { + return filterFlags(c.requestHeader("Content-Type")) +} + +// IsWebsocket returns true if the request headers indicate that a websocket +// handshake is being initiated by the client. +func (c *Context) IsWebsocket() bool { + if strings.Contains(strings.ToLower(c.requestHeader("Connection")), "upgrade") && + strings.ToLower(c.requestHeader("Upgrade")) == "websocket" { + return true + } + return false +} + +func (c *Context) requestHeader(key string) string { + return c.Request.Header.Get(key) +} + +/************************************/ +/******** RESPONSE RENDERING ********/ +/************************************/ + +// bodyAllowedForStatus is a copy of http.bodyAllowedForStatus non-exported function. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +// Status sets the HTTP response code. +func (c *Context) Status(code int) { + c.writermem.WriteHeader(code) +} + +// Header is a intelligent shortcut for c.Writer.Header().Set(key, value). +// It writes a header in the response. +// If value == "", this method removes the header `c.Writer.Header().Del(key)` +func (c *Context) Header(key, value string) { + if value == "" { + c.Writer.Header().Del(key) + } else { + c.Writer.Header().Set(key, value) + } +} + +// GetHeader returns value from request headers. +func (c *Context) GetHeader(key string) string { + return c.requestHeader(key) +} + +// GetRawData return stream data. +func (c *Context) GetRawData() ([]byte, error) { + return ioutil.ReadAll(c.Request.Body) +} + +// SetCookie adds a Set-Cookie header to the ResponseWriter's headers. +// The provided cookie must have a valid Name. Invalid cookies may be +// silently dropped. +func (c *Context) SetCookie(name, value string, maxAge int, path, domain string, secure, httpOnly bool) { + if path == "" { + path = "/" + } + http.SetCookie(c.Writer, &http.Cookie{ + Name: name, + Value: url.QueryEscape(value), + MaxAge: maxAge, + Path: path, + Domain: domain, + Secure: secure, + HttpOnly: httpOnly, + }) +} + +// Cookie returns the named cookie provided in the request or +// ErrNoCookie if not found. And return the named cookie is unescaped. +// If multiple cookies match the given name, only one cookie will +// be returned. +func (c *Context) Cookie(name string) (string, error) { + cookie, err := c.Request.Cookie(name) + if err != nil { + return "", err + } + val, _ := url.QueryUnescape(cookie.Value) + return val, nil +} + +func (c *Context) Render(code int, r render.Render) { + c.Status(code) + + if !bodyAllowedForStatus(code) { + r.WriteContentType(c.Writer) + c.Writer.WriteHeaderNow() + return + } + + if err := r.Render(c.Writer); err != nil { + panic(err) + } +} + +// HTML renders the HTTP template specified by its file name. +// It also updates the HTTP code and sets the Content-Type as "text/html". +// See http://golang.org/doc/articles/wiki/ +func (c *Context) HTML(code int, name string, obj interface{}) { + instance := c.engine.HTMLRender.Instance(name, obj) + c.Render(code, instance) +} + +// IndentedJSON serializes the given struct as pretty JSON (indented + endlines) into the response body. +// It also sets the Content-Type as "application/json". +// WARNING: we recommend to use this only for development purposes since printing pretty JSON is +// more CPU and bandwidth consuming. Use Context.JSON() instead. +func (c *Context) IndentedJSON(code int, obj interface{}) { + c.Render(code, render.IndentedJSON{Data: obj}) +} + +// SecureJSON serializes the given struct as Secure JSON into the response body. +// Default prepends "while(1)," to response body if the given struct is array values. +// It also sets the Content-Type as "application/json". +func (c *Context) SecureJSON(code int, obj interface{}) { + c.Render(code, render.SecureJSON{Prefix: c.engine.secureJsonPrefix, Data: obj}) +} + +// JSON serializes the given struct as JSON into the response body. +// It also sets the Content-Type as "application/json". +func (c *Context) JSON(code int, obj interface{}) { + c.Render(code, render.JSON{Data: obj}) +} + +// XML serializes the given struct as XML into the response body. +// It also sets the Content-Type as "application/xml". +func (c *Context) XML(code int, obj interface{}) { + c.Render(code, render.XML{Data: obj}) +} + +// YAML serializes the given struct as YAML into the response body. +func (c *Context) YAML(code int, obj interface{}) { + c.Render(code, render.YAML{Data: obj}) +} + +// String writes the given string into the response body. +func (c *Context) String(code int, format string, values ...interface{}) { + c.Render(code, render.String{Format: format, Data: values}) +} + +// Redirect returns a HTTP redirect to the specific location. +func (c *Context) Redirect(code int, location string) { + c.Render(-1, render.Redirect{ + Code: code, + Location: location, + Request: c.Request, + }) +} + +// Data writes some data into the body stream and updates the HTTP code. +func (c *Context) Data(code int, contentType string, data []byte) { + c.Render(code, render.Data{ + ContentType: contentType, + Data: data, + }) +} + +// File writes the specified file into the body stream in a efficient way. +func (c *Context) File(filepath string) { + http.ServeFile(c.Writer, c.Request, filepath) +} + +// SSEvent writes a Server-Sent Event into the body stream. +func (c *Context) SSEvent(name string, message interface{}) { + c.Render(-1, sse.Event{ + Event: name, + Data: message, + }) +} + +func (c *Context) Stream(step func(w io.Writer) bool) { + w := c.Writer + clientGone := w.CloseNotify() + for { + select { + case <-clientGone: + return + default: + keepOpen := step(w) + w.Flush() + if !keepOpen { + return + } + } + } +} + +/************************************/ +/******** CONTENT NEGOTIATION *******/ +/************************************/ + +type Negotiate struct { + Offered []string + HTMLName string + HTMLData interface{} + JSONData interface{} + XMLData interface{} + Data interface{} +} + +func (c *Context) Negotiate(code int, config Negotiate) { + switch c.NegotiateFormat(config.Offered...) { + case binding.MIMEJSON: + data := chooseData(config.JSONData, config.Data) + c.JSON(code, data) + + case binding.MIMEHTML: + data := chooseData(config.HTMLData, config.Data) + c.HTML(code, config.HTMLName, data) + + case binding.MIMEXML: + data := chooseData(config.XMLData, config.Data) + c.XML(code, data) + + default: + c.AbortWithError(http.StatusNotAcceptable, errors.New("the accepted formats are not offered by the server")) + } +} + +func (c *Context) NegotiateFormat(offered ...string) string { + assert1(len(offered) > 0, "you must provide at least one offer") + + if c.Accepted == nil { + c.Accepted = parseAccept(c.requestHeader("Accept")) + } + if len(c.Accepted) == 0 { + return offered[0] + } + for _, accepted := range c.Accepted { + for _, offert := range offered { + if accepted == offert { + return offert + } + } + } + return "" +} + +func (c *Context) SetAccepted(formats ...string) { + c.Accepted = formats +} + +/************************************/ +/***** GOLANG.ORG/X/NET/CONTEXT *****/ +/************************************/ + +func (c *Context) Deadline() (deadline time.Time, ok bool) { + return +} + +func (c *Context) Done() <-chan struct{} { + return nil +} + +func (c *Context) Err() error { + return nil +} + +func (c *Context) Value(key interface{}) interface{} { + if key == 0 { + return c.Request + } + if keyAsString, ok := key.(string); ok { + val, _ := c.Get(keyAsString) + return val + } + return nil +} diff --git a/vendor/github.com/gin-gonic/gin/context_appengine.go b/vendor/github.com/gin-gonic/gin/context_appengine.go new file mode 100644 index 000000000..38c189a0b --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/context_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +func init() { + defaultAppEngine = true +} diff --git a/vendor/github.com/gin-gonic/gin/context_test.go b/vendor/github.com/gin-gonic/gin/context_test.go new file mode 100644 index 000000000..9024cfc12 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/context_test.go @@ -0,0 +1,1379 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bytes" + "errors" + "fmt" + "html/template" + "mime/multipart" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + "time" + + "github.com/gin-contrib/sse" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +var _ context.Context = &Context{} + +// Unit tests TODO +// func (c *Context) File(filepath string) { +// func (c *Context) Negotiate(code int, config Negotiate) { +// BAD case: func (c *Context) Render(code int, render render.Render, obj ...interface{}) { +// test that information is not leaked when reusing Contexts (using the Pool) + +func createMultipartRequest() *http.Request { + boundary := "--testboundary" + body := new(bytes.Buffer) + mw := multipart.NewWriter(body) + defer mw.Close() + + must(mw.SetBoundary(boundary)) + must(mw.WriteField("foo", "bar")) + must(mw.WriteField("bar", "10")) + must(mw.WriteField("bar", "foo2")) + must(mw.WriteField("array", "first")) + must(mw.WriteField("array", "second")) + must(mw.WriteField("id", "")) + must(mw.WriteField("time_local", "31/12/2016 14:55")) + must(mw.WriteField("time_utc", "31/12/2016 14:55")) + must(mw.WriteField("time_location", "31/12/2016 14:55")) + req, err := http.NewRequest("POST", "/", body) + must(err) + req.Header.Set("Content-Type", MIMEMultipartPOSTForm+"; boundary="+boundary) + return req +} + +func must(err error) { + if err != nil { + panic(err.Error()) + } +} + +func TestContextFormFile(t *testing.T) { + buf := new(bytes.Buffer) + mw := multipart.NewWriter(buf) + w, err := mw.CreateFormFile("file", "test") + if assert.NoError(t, err) { + w.Write([]byte("test")) + } + mw.Close() + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("POST", "/", buf) + c.Request.Header.Set("Content-Type", mw.FormDataContentType()) + f, err := c.FormFile("file") + if assert.NoError(t, err) { + assert.Equal(t, "test", f.Filename) + } + + assert.NoError(t, c.SaveUploadedFile(f, "test")) +} + +func TestContextMultipartForm(t *testing.T) { + buf := new(bytes.Buffer) + mw := multipart.NewWriter(buf) + mw.WriteField("foo", "bar") + w, err := mw.CreateFormFile("file", "test") + if assert.NoError(t, err) { + w.Write([]byte("test")) + } + mw.Close() + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("POST", "/", buf) + c.Request.Header.Set("Content-Type", mw.FormDataContentType()) + f, err := c.MultipartForm() + if assert.NoError(t, err) { + assert.NotNil(t, f) + } + + assert.NoError(t, c.SaveUploadedFile(f.File["file"][0], "test")) +} + +func TestSaveUploadedOpenFailed(t *testing.T) { + buf := new(bytes.Buffer) + mw := multipart.NewWriter(buf) + mw.Close() + + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("POST", "/", buf) + c.Request.Header.Set("Content-Type", mw.FormDataContentType()) + + f := &multipart.FileHeader{ + Filename: "file", + } + assert.Error(t, c.SaveUploadedFile(f, "test")) +} + +func TestSaveUploadedCreateFailed(t *testing.T) { + buf := new(bytes.Buffer) + mw := multipart.NewWriter(buf) + w, err := mw.CreateFormFile("file", "test") + if assert.NoError(t, err) { + w.Write([]byte("test")) + } + mw.Close() + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("POST", "/", buf) + c.Request.Header.Set("Content-Type", mw.FormDataContentType()) + f, err := c.FormFile("file") + if assert.NoError(t, err) { + assert.Equal(t, "test", f.Filename) + } + + assert.Error(t, c.SaveUploadedFile(f, "/")) +} + +func TestContextReset(t *testing.T) { + router := New() + c := router.allocateContext() + assert.Equal(t, c.engine, router) + + c.index = 2 + c.Writer = &responseWriter{ResponseWriter: httptest.NewRecorder()} + c.Params = Params{Param{}} + c.Error(errors.New("test")) + c.Set("foo", "bar") + c.reset() + + assert.False(t, c.IsAborted()) + assert.Nil(t, c.Keys) + assert.Nil(t, c.Accepted) + assert.Len(t, c.Errors, 0) + assert.Empty(t, c.Errors.Errors()) + assert.Empty(t, c.Errors.ByType(ErrorTypeAny)) + assert.Len(t, c.Params, 0) + assert.EqualValues(t, c.index, -1) + assert.Equal(t, c.Writer.(*responseWriter), &c.writermem) +} + +func TestContextHandlers(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + assert.Nil(t, c.handlers) + assert.Nil(t, c.handlers.Last()) + + c.handlers = HandlersChain{} + assert.NotNil(t, c.handlers) + assert.Nil(t, c.handlers.Last()) + + f := func(c *Context) {} + g := func(c *Context) {} + + c.handlers = HandlersChain{f} + compareFunc(t, f, c.handlers.Last()) + + c.handlers = HandlersChain{f, g} + compareFunc(t, g, c.handlers.Last()) +} + +// TestContextSetGet tests that a parameter is set correctly on the +// current context and can be retrieved using Get. +func TestContextSetGet(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Set("foo", "bar") + + value, err := c.Get("foo") + assert.Equal(t, "bar", value) + assert.True(t, err) + + value, err = c.Get("foo2") + assert.Nil(t, value) + assert.False(t, err) + + assert.Equal(t, "bar", c.MustGet("foo")) + assert.Panics(t, func() { c.MustGet("no_exist") }) +} + +func TestContextSetGetValues(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Set("string", "this is a string") + c.Set("int32", int32(-42)) + c.Set("int64", int64(42424242424242)) + c.Set("uint64", uint64(42)) + c.Set("float32", float32(4.2)) + c.Set("float64", 4.2) + var a interface{} = 1 + c.Set("intInterface", a) + + assert.Exactly(t, c.MustGet("string").(string), "this is a string") + assert.Exactly(t, c.MustGet("int32").(int32), int32(-42)) + assert.Exactly(t, c.MustGet("int64").(int64), int64(42424242424242)) + assert.Exactly(t, c.MustGet("uint64").(uint64), uint64(42)) + assert.Exactly(t, c.MustGet("float32").(float32), float32(4.2)) + assert.Exactly(t, c.MustGet("float64").(float64), 4.2) + assert.Exactly(t, c.MustGet("intInterface").(int), 1) + +} + +func TestContextGetString(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Set("string", "this is a string") + assert.Equal(t, "this is a string", c.GetString("string")) +} + +func TestContextSetGetBool(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Set("bool", true) + assert.True(t, c.GetBool("bool")) +} + +func TestContextGetInt(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Set("int", 1) + assert.Equal(t, 1, c.GetInt("int")) +} + +func TestContextGetInt64(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Set("int64", int64(42424242424242)) + assert.Equal(t, int64(42424242424242), c.GetInt64("int64")) +} + +func TestContextGetFloat64(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Set("float64", 4.2) + assert.Equal(t, 4.2, c.GetFloat64("float64")) +} + +func TestContextGetTime(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + t1, _ := time.Parse("1/2/2006 15:04:05", "01/01/2017 12:00:00") + c.Set("time", t1) + assert.Equal(t, t1, c.GetTime("time")) +} + +func TestContextGetDuration(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Set("duration", time.Second) + assert.Equal(t, time.Second, c.GetDuration("duration")) +} + +func TestContextGetStringSlice(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Set("slice", []string{"foo"}) + assert.Equal(t, []string{"foo"}, c.GetStringSlice("slice")) +} + +func TestContextGetStringMap(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + var m = make(map[string]interface{}) + m["foo"] = 1 + c.Set("map", m) + + assert.Equal(t, m, c.GetStringMap("map")) + assert.Equal(t, 1, c.GetStringMap("map")["foo"]) +} + +func TestContextGetStringMapString(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + var m = make(map[string]string) + m["foo"] = "bar" + c.Set("map", m) + + assert.Equal(t, m, c.GetStringMapString("map")) + assert.Equal(t, "bar", c.GetStringMapString("map")["foo"]) +} + +func TestContextGetStringMapStringSlice(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + var m = make(map[string][]string) + m["foo"] = []string{"foo"} + c.Set("map", m) + + assert.Equal(t, m, c.GetStringMapStringSlice("map")) + assert.Equal(t, []string{"foo"}, c.GetStringMapStringSlice("map")["foo"]) +} + +func TestContextCopy(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.index = 2 + c.Request, _ = http.NewRequest("POST", "/hola", nil) + c.handlers = HandlersChain{func(c *Context) {}} + c.Params = Params{Param{Key: "foo", Value: "bar"}} + c.Set("foo", "bar") + + cp := c.Copy() + assert.Nil(t, cp.handlers) + assert.Nil(t, cp.writermem.ResponseWriter) + assert.Equal(t, &cp.writermem, cp.Writer.(*responseWriter)) + assert.Equal(t, cp.Request, c.Request) + assert.Equal(t, cp.index, abortIndex) + assert.Equal(t, cp.Keys, c.Keys) + assert.Equal(t, cp.engine, c.engine) + assert.Equal(t, cp.Params, c.Params) +} + +func TestContextHandlerName(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.handlers = HandlersChain{func(c *Context) {}, handlerNameTest} + + assert.Regexp(t, "^(.*/vendor/)?github.com/gin-gonic/gin.handlerNameTest$", c.HandlerName()) +} + +func handlerNameTest(c *Context) { + +} + +var handlerTest HandlerFunc = func(c *Context) { + +} + +func TestContextHandler(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.handlers = HandlersChain{func(c *Context) {}, handlerTest} + + assert.Equal(t, reflect.ValueOf(handlerTest).Pointer(), reflect.ValueOf(c.Handler()).Pointer()) +} + +func TestContextQuery(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("GET", "http://example.com/?foo=bar&page=10&id=", nil) + + value, ok := c.GetQuery("foo") + assert.True(t, ok) + assert.Equal(t, "bar", value) + assert.Equal(t, "bar", c.DefaultQuery("foo", "none")) + assert.Equal(t, "bar", c.Query("foo")) + + value, ok = c.GetQuery("page") + assert.True(t, ok) + assert.Equal(t, "10", value) + assert.Equal(t, "10", c.DefaultQuery("page", "0")) + assert.Equal(t, "10", c.Query("page")) + + value, ok = c.GetQuery("id") + assert.True(t, ok) + assert.Empty(t, value) + assert.Empty(t, c.DefaultQuery("id", "nada")) + assert.Empty(t, c.Query("id")) + + value, ok = c.GetQuery("NoKey") + assert.False(t, ok) + assert.Empty(t, value) + assert.Equal(t, "nada", c.DefaultQuery("NoKey", "nada")) + assert.Empty(t, c.Query("NoKey")) + + // postform should not mess + value, ok = c.GetPostForm("page") + assert.False(t, ok) + assert.Empty(t, value) + assert.Empty(t, c.PostForm("foo")) +} + +func TestContextQueryAndPostForm(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + body := bytes.NewBufferString("foo=bar&page=11&both=&foo=second") + c.Request, _ = http.NewRequest("POST", "/?both=GET&id=main&id=omit&array[]=first&array[]=second", body) + c.Request.Header.Add("Content-Type", MIMEPOSTForm) + + assert.Equal(t, "bar", c.DefaultPostForm("foo", "none")) + assert.Equal(t, "bar", c.PostForm("foo")) + assert.Empty(t, c.Query("foo")) + + value, ok := c.GetPostForm("page") + assert.True(t, ok) + assert.Equal(t, "11", value) + assert.Equal(t, "11", c.DefaultPostForm("page", "0")) + assert.Equal(t, "11", c.PostForm("page")) + assert.Empty(t, c.Query("page")) + + value, ok = c.GetPostForm("both") + assert.True(t, ok) + assert.Empty(t, value) + assert.Empty(t, c.PostForm("both")) + assert.Empty(t, c.DefaultPostForm("both", "nothing")) + assert.Equal(t, "GET", c.Query("both"), "GET") + + value, ok = c.GetQuery("id") + assert.True(t, ok) + assert.Equal(t, "main", value) + assert.Equal(t, "000", c.DefaultPostForm("id", "000")) + assert.Equal(t, "main", c.Query("id")) + assert.Empty(t, c.PostForm("id")) + + value, ok = c.GetQuery("NoKey") + assert.False(t, ok) + assert.Empty(t, value) + value, ok = c.GetPostForm("NoKey") + assert.False(t, ok) + assert.Empty(t, value) + assert.Equal(t, "nada", c.DefaultPostForm("NoKey", "nada")) + assert.Equal(t, "nothing", c.DefaultQuery("NoKey", "nothing")) + assert.Empty(t, c.PostForm("NoKey")) + assert.Empty(t, c.Query("NoKey")) + + var obj struct { + Foo string `form:"foo"` + ID string `form:"id"` + Page int `form:"page"` + Both string `form:"both"` + Array []string `form:"array[]"` + } + assert.NoError(t, c.Bind(&obj)) + assert.Equal(t, "bar", obj.Foo, "bar") + assert.Equal(t, "main", obj.ID, "main") + assert.Equal(t, 11, obj.Page, 11) + assert.Empty(t, obj.Both) + assert.Equal(t, []string{"first", "second"}, obj.Array) + + values, ok := c.GetQueryArray("array[]") + assert.True(t, ok) + assert.Equal(t, "first", values[0]) + assert.Equal(t, "second", values[1]) + + values = c.QueryArray("array[]") + assert.Equal(t, "first", values[0]) + assert.Equal(t, "second", values[1]) + + values = c.QueryArray("nokey") + assert.Equal(t, 0, len(values)) + + values = c.QueryArray("both") + assert.Equal(t, 1, len(values)) + assert.Equal(t, "GET", values[0]) +} + +func TestContextPostFormMultipart(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request = createMultipartRequest() + + var obj struct { + Foo string `form:"foo"` + Bar string `form:"bar"` + BarAsInt int `form:"bar"` + Array []string `form:"array"` + ID string `form:"id"` + TimeLocal time.Time `form:"time_local" time_format:"02/01/2006 15:04"` + TimeUTC time.Time `form:"time_utc" time_format:"02/01/2006 15:04" time_utc:"1"` + TimeLocation time.Time `form:"time_location" time_format:"02/01/2006 15:04" time_location:"Asia/Tokyo"` + BlankTime time.Time `form:"blank_time" time_format:"02/01/2006 15:04"` + } + assert.NoError(t, c.Bind(&obj)) + assert.Equal(t, "bar", obj.Foo) + assert.Equal(t, "10", obj.Bar) + assert.Equal(t, 10, obj.BarAsInt) + assert.Equal(t, []string{"first", "second"}, obj.Array) + assert.Empty(t, obj.ID) + assert.Equal(t, "31/12/2016 14:55", obj.TimeLocal.Format("02/01/2006 15:04")) + assert.Equal(t, time.Local, obj.TimeLocal.Location()) + assert.Equal(t, "31/12/2016 14:55", obj.TimeUTC.Format("02/01/2006 15:04")) + assert.Equal(t, time.UTC, obj.TimeUTC.Location()) + loc, _ := time.LoadLocation("Asia/Tokyo") + assert.Equal(t, "31/12/2016 14:55", obj.TimeLocation.Format("02/01/2006 15:04")) + assert.Equal(t, loc, obj.TimeLocation.Location()) + assert.True(t, obj.BlankTime.IsZero()) + + value, ok := c.GetQuery("foo") + assert.False(t, ok) + assert.Empty(t, value) + assert.Empty(t, c.Query("bar")) + assert.Equal(t, "nothing", c.DefaultQuery("id", "nothing")) + + value, ok = c.GetPostForm("foo") + assert.True(t, ok) + assert.Equal(t, "bar", value) + assert.Equal(t, "bar", c.PostForm("foo")) + + value, ok = c.GetPostForm("array") + assert.True(t, ok) + assert.Equal(t, "first", value) + assert.Equal(t, "first", c.PostForm("array")) + + assert.Equal(t, "10", c.DefaultPostForm("bar", "nothing")) + + value, ok = c.GetPostForm("id") + assert.True(t, ok) + assert.Empty(t, value) + assert.Empty(t, c.PostForm("id")) + assert.Empty(t, c.DefaultPostForm("id", "nothing")) + + value, ok = c.GetPostForm("nokey") + assert.False(t, ok) + assert.Empty(t, value) + assert.Equal(t, "nothing", c.DefaultPostForm("nokey", "nothing")) + + values, ok := c.GetPostFormArray("array") + assert.True(t, ok) + assert.Equal(t, "first", values[0]) + assert.Equal(t, "second", values[1]) + + values = c.PostFormArray("array") + assert.Equal(t, "first", values[0]) + assert.Equal(t, "second", values[1]) + + values = c.PostFormArray("nokey") + assert.Equal(t, 0, len(values)) + + values = c.PostFormArray("foo") + assert.Equal(t, 1, len(values)) + assert.Equal(t, "bar", values[0]) +} + +func TestContextSetCookie(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.SetCookie("user", "gin", 1, "/", "localhost", true, true) + assert.Equal(t, "user=gin; Path=/; Domain=localhost; Max-Age=1; HttpOnly; Secure", c.Writer.Header().Get("Set-Cookie")) +} + +func TestContextSetCookiePathEmpty(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.SetCookie("user", "gin", 1, "", "localhost", true, true) + assert.Equal(t, "user=gin; Path=/; Domain=localhost; Max-Age=1; HttpOnly; Secure", c.Writer.Header().Get("Set-Cookie")) +} + +func TestContextGetCookie(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("GET", "/get", nil) + c.Request.Header.Set("Cookie", "user=gin") + cookie, _ := c.Cookie("user") + assert.Equal(t, "gin", cookie) + + _, err := c.Cookie("nokey") + assert.Error(t, err) +} + +func TestContextBodyAllowedForStatus(t *testing.T) { + assert.False(t, false, bodyAllowedForStatus(102)) + assert.False(t, false, bodyAllowedForStatus(204)) + assert.False(t, false, bodyAllowedForStatus(304)) + assert.True(t, true, bodyAllowedForStatus(500)) +} + +type TestPanicRender struct { +} + +func (*TestPanicRender) Render(http.ResponseWriter) error { + return errors.New("TestPanicRender") +} + +func (*TestPanicRender) WriteContentType(http.ResponseWriter) {} + +func TestContextRenderPanicIfErr(t *testing.T) { + defer func() { + r := recover() + assert.Equal(t, "TestPanicRender", fmt.Sprint(r)) + }() + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Render(http.StatusOK, &TestPanicRender{}) + + assert.Fail(t, "Panic not detected") +} + +// Tests that the response is serialized as JSON +// and Content-Type is set to application/json +func TestContextRenderJSON(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.JSON(201, H{"foo": "bar"}) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "{\"foo\":\"bar\"}", w.Body.String()) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// Tests that no JSON is rendered if code is 204 +func TestContextRenderNoContentJSON(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.JSON(204, H{"foo": "bar"}) + + assert.Equal(t, 204, w.Code) + assert.Empty(t, w.Body.String()) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// Tests that the response is serialized as JSON +// we change the content-type before +func TestContextRenderAPIJSON(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Header("Content-Type", "application/vnd.api+json") + c.JSON(201, H{"foo": "bar"}) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "{\"foo\":\"bar\"}", w.Body.String()) + assert.Equal(t, "application/vnd.api+json", w.HeaderMap.Get("Content-Type")) +} + +// Tests that no Custom JSON is rendered if code is 204 +func TestContextRenderNoContentAPIJSON(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Header("Content-Type", "application/vnd.api+json") + c.JSON(204, H{"foo": "bar"}) + + assert.Equal(t, 204, w.Code) + assert.Empty(t, w.Body.String()) + assert.Equal(t, w.HeaderMap.Get("Content-Type"), "application/vnd.api+json") +} + +// Tests that the response is serialized as JSON +// and Content-Type is set to application/json +func TestContextRenderIndentedJSON(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.IndentedJSON(201, H{"foo": "bar", "bar": "foo", "nested": H{"foo": "bar"}}) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "{\n \"bar\": \"foo\",\n \"foo\": \"bar\",\n \"nested\": {\n \"foo\": \"bar\"\n }\n}", w.Body.String()) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// Tests that no Custom JSON is rendered if code is 204 +func TestContextRenderNoContentIndentedJSON(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.IndentedJSON(204, H{"foo": "bar", "bar": "foo", "nested": H{"foo": "bar"}}) + + assert.Equal(t, 204, w.Code) + assert.Empty(t, w.Body.String()) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// Tests that the response is serialized as Secure JSON +// and Content-Type is set to application/json +func TestContextRenderSecureJSON(t *testing.T) { + w := httptest.NewRecorder() + c, router := CreateTestContext(w) + + router.SecureJsonPrefix("&&&START&&&") + c.SecureJSON(201, []string{"foo", "bar"}) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "&&&START&&&[\"foo\",\"bar\"]", w.Body.String()) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// Tests that no Custom JSON is rendered if code is 204 +func TestContextRenderNoContentSecureJSON(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.SecureJSON(204, []string{"foo", "bar"}) + + assert.Equal(t, 204, w.Code) + assert.Empty(t, w.Body.String()) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// Tests that the response executes the templates +// and responds with Content-Type set to text/html +func TestContextRenderHTML(t *testing.T) { + w := httptest.NewRecorder() + c, router := CreateTestContext(w) + + templ := template.Must(template.New("t").Parse(`Hello {{.name}}`)) + router.SetHTMLTemplate(templ) + + c.HTML(201, "t", H{"name": "alexandernyquist"}) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "Hello alexandernyquist", w.Body.String()) + assert.Equal(t, "text/html; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +func TestContextRenderHTML2(t *testing.T) { + w := httptest.NewRecorder() + c, router := CreateTestContext(w) + + // print debug warning log when Engine.trees > 0 + router.addRoute("GET", "/", HandlersChain{func(_ *Context) {}}) + assert.Len(t, router.trees, 1) + + var b bytes.Buffer + setup(&b) + defer teardown() + + templ := template.Must(template.New("t").Parse(`Hello {{.name}}`)) + router.SetHTMLTemplate(templ) + + assert.Equal(t, "[GIN-debug] [WARNING] Since SetHTMLTemplate() is NOT thread-safe. It should only be called\nat initialization. ie. before any route is registered or the router is listening in a socket:\n\n\trouter := gin.Default()\n\trouter.SetHTMLTemplate(template) // << good place\n\n", b.String()) + + c.HTML(201, "t", H{"name": "alexandernyquist"}) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "Hello alexandernyquist", w.Body.String()) + assert.Equal(t, "text/html; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// Tests that no HTML is rendered if code is 204 +func TestContextRenderNoContentHTML(t *testing.T) { + w := httptest.NewRecorder() + c, router := CreateTestContext(w) + templ := template.Must(template.New("t").Parse(`Hello {{.name}}`)) + router.SetHTMLTemplate(templ) + + c.HTML(204, "t", H{"name": "alexandernyquist"}) + + assert.Equal(t, 204, w.Code) + assert.Empty(t, w.Body.String()) + assert.Equal(t, "text/html; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// TestContextXML tests that the response is serialized as XML +// and Content-Type is set to application/xml +func TestContextRenderXML(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.XML(201, H{"foo": "bar"}) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "bar", w.Body.String()) + assert.Equal(t, "application/xml; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// Tests that no XML is rendered if code is 204 +func TestContextRenderNoContentXML(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.XML(204, H{"foo": "bar"}) + + assert.Equal(t, 204, w.Code) + assert.Empty(t, w.Body.String()) + assert.Equal(t, "application/xml; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// TestContextString tests that the response is returned +// with Content-Type set to text/plain +func TestContextRenderString(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.String(201, "test %s %d", "string", 2) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "test string 2", w.Body.String()) + assert.Equal(t, "text/plain; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// Tests that no String is rendered if code is 204 +func TestContextRenderNoContentString(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.String(204, "test %s %d", "string", 2) + + assert.Equal(t, 204, w.Code) + assert.Empty(t, w.Body.String()) + assert.Equal(t, "text/plain; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// TestContextString tests that the response is returned +// with Content-Type set to text/html +func TestContextRenderHTMLString(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Header("Content-Type", "text/html; charset=utf-8") + c.String(201, "%s %d", "string", 3) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "string 3", w.Body.String()) + assert.Equal(t, "text/html; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// Tests that no HTML String is rendered if code is 204 +func TestContextRenderNoContentHTMLString(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Header("Content-Type", "text/html; charset=utf-8") + c.String(204, "%s %d", "string", 3) + + assert.Equal(t, 204, w.Code) + assert.Empty(t, w.Body.String()) + assert.Equal(t, "text/html; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// TestContextData tests that the response can be written from `bytesting` +// with specified MIME type +func TestContextRenderData(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Data(201, "text/csv", []byte(`foo,bar`)) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "foo,bar", w.Body.String()) + assert.Equal(t, "text/csv", w.HeaderMap.Get("Content-Type")) +} + +// Tests that no Custom Data is rendered if code is 204 +func TestContextRenderNoContentData(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Data(204, "text/csv", []byte(`foo,bar`)) + + assert.Equal(t, 204, w.Code) + assert.Empty(t, w.Body.String()) + assert.Equal(t, "text/csv", w.HeaderMap.Get("Content-Type")) +} + +func TestContextRenderSSE(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.SSEvent("float", 1.5) + c.Render(-1, sse.Event{ + Id: "123", + Data: "text", + }) + c.SSEvent("chat", H{ + "foo": "bar", + "bar": "foo", + }) + + assert.Equal(t, strings.Replace(w.Body.String(), " ", "", -1), strings.Replace("event:float\ndata:1.5\n\nid:123\ndata:text\n\nevent:chat\ndata:{\"bar\":\"foo\",\"foo\":\"bar\"}\n\n", " ", "", -1)) +} + +func TestContextRenderFile(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Request, _ = http.NewRequest("GET", "/", nil) + c.File("./gin.go") + + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "func New() *Engine {") + assert.Equal(t, "text/plain; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// TestContextRenderYAML tests that the response is serialized as YAML +// and Content-Type is set to application/x-yaml +func TestContextRenderYAML(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.YAML(201, H{"foo": "bar"}) + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "foo: bar\n", w.Body.String()) + assert.Equal(t, "application/x-yaml; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +func TestContextHeaders(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Header("Content-Type", "text/plain") + c.Header("X-Custom", "value") + + assert.Equal(t, "text/plain", c.Writer.Header().Get("Content-Type")) + assert.Equal(t, "value", c.Writer.Header().Get("X-Custom")) + + c.Header("Content-Type", "text/html") + c.Header("X-Custom", "") + + assert.Equal(t, "text/html", c.Writer.Header().Get("Content-Type")) + _, exist := c.Writer.Header()["X-Custom"] + assert.False(t, exist) +} + +// TODO +func TestContextRenderRedirectWithRelativePath(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Request, _ = http.NewRequest("POST", "http://example.com", nil) + assert.Panics(t, func() { c.Redirect(299, "/new_path") }) + assert.Panics(t, func() { c.Redirect(309, "/new_path") }) + + c.Redirect(301, "/path") + c.Writer.WriteHeaderNow() + assert.Equal(t, 301, w.Code) + assert.Equal(t, "/path", w.Header().Get("Location")) +} + +func TestContextRenderRedirectWithAbsolutePath(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Request, _ = http.NewRequest("POST", "http://example.com", nil) + c.Redirect(302, "http://google.com") + c.Writer.WriteHeaderNow() + + assert.Equal(t, 302, w.Code) + assert.Equal(t, "http://google.com", w.Header().Get("Location")) +} + +func TestContextRenderRedirectWith201(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Request, _ = http.NewRequest("POST", "http://example.com", nil) + c.Redirect(201, "/resource") + c.Writer.WriteHeaderNow() + + assert.Equal(t, 201, w.Code) + assert.Equal(t, "/resource", w.Header().Get("Location")) +} + +func TestContextRenderRedirectAll(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("POST", "http://example.com", nil) + assert.Panics(t, func() { c.Redirect(200, "/resource") }) + assert.Panics(t, func() { c.Redirect(202, "/resource") }) + assert.Panics(t, func() { c.Redirect(299, "/resource") }) + assert.Panics(t, func() { c.Redirect(309, "/resource") }) + assert.NotPanics(t, func() { c.Redirect(300, "/resource") }) + assert.NotPanics(t, func() { c.Redirect(308, "/resource") }) +} + +func TestContextNegotiationWithJSON(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + c.Request, _ = http.NewRequest("POST", "", nil) + + c.Negotiate(200, Negotiate{ + Offered: []string{MIMEJSON, MIMEXML}, + Data: H{"foo": "bar"}, + }) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "{\"foo\":\"bar\"}", w.Body.String()) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +func TestContextNegotiationWithXML(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + c.Request, _ = http.NewRequest("POST", "", nil) + + c.Negotiate(200, Negotiate{ + Offered: []string{MIMEXML, MIMEJSON}, + Data: H{"foo": "bar"}, + }) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "bar", w.Body.String()) + assert.Equal(t, "application/xml; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +func TestContextNegotiationWithHTML(t *testing.T) { + w := httptest.NewRecorder() + c, router := CreateTestContext(w) + c.Request, _ = http.NewRequest("POST", "", nil) + templ := template.Must(template.New("t").Parse(`Hello {{.name}}`)) + router.SetHTMLTemplate(templ) + + c.Negotiate(200, Negotiate{ + Offered: []string{MIMEHTML}, + Data: H{"name": "gin"}, + HTMLName: "t", + }) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "Hello gin", w.Body.String()) + assert.Equal(t, "text/html; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +func TestContextNegotiationNotSupport(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + c.Request, _ = http.NewRequest("POST", "", nil) + + c.Negotiate(200, Negotiate{ + Offered: []string{MIMEPOSTForm}, + }) + + assert.Equal(t, 406, w.Code) + assert.Equal(t, c.index, abortIndex) + assert.True(t, c.IsAborted()) +} + +func TestContextNegotiationFormat(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("POST", "", nil) + + assert.Panics(t, func() { c.NegotiateFormat() }) + assert.Equal(t, MIMEJSON, c.NegotiateFormat(MIMEJSON, MIMEXML)) + assert.Equal(t, MIMEHTML, c.NegotiateFormat(MIMEHTML, MIMEJSON)) +} + +func TestContextNegotiationFormatWithAccept(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("POST", "/", nil) + c.Request.Header.Add("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8") + + assert.Equal(t, MIMEXML, c.NegotiateFormat(MIMEJSON, MIMEXML)) + assert.Equal(t, MIMEHTML, c.NegotiateFormat(MIMEXML, MIMEHTML)) + assert.Empty(t, c.NegotiateFormat(MIMEJSON)) +} + +func TestContextNegotiationFormatCustum(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("POST", "/", nil) + c.Request.Header.Add("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8") + + c.Accepted = nil + c.SetAccepted(MIMEJSON, MIMEXML) + + assert.Equal(t, MIMEJSON, c.NegotiateFormat(MIMEJSON, MIMEXML)) + assert.Equal(t, MIMEXML, c.NegotiateFormat(MIMEXML, MIMEHTML)) + assert.Equal(t, MIMEJSON, c.NegotiateFormat(MIMEJSON)) +} + +func TestContextIsAborted(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + assert.False(t, c.IsAborted()) + + c.Abort() + assert.True(t, c.IsAborted()) + + c.Next() + assert.True(t, c.IsAborted()) + + c.index++ + assert.True(t, c.IsAborted()) +} + +// TestContextData tests that the response can be written from `bytesting` +// with specified MIME type +func TestContextAbortWithStatus(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.index = 4 + c.AbortWithStatus(401) + + assert.Equal(t, abortIndex, c.index) + assert.Equal(t, 401, c.Writer.Status()) + assert.Equal(t, 401, w.Code) + assert.True(t, c.IsAborted()) +} + +type testJSONAbortMsg struct { + Foo string `json:"foo"` + Bar string `json:"bar"` +} + +func TestContextAbortWithStatusJSON(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + c.index = 4 + + in := new(testJSONAbortMsg) + in.Bar = "barValue" + in.Foo = "fooValue" + + c.AbortWithStatusJSON(415, in) + + assert.Equal(t, abortIndex, c.index) + assert.Equal(t, 415, c.Writer.Status()) + assert.Equal(t, 415, w.Code) + assert.True(t, c.IsAborted()) + + contentType := w.Header().Get("Content-Type") + assert.Equal(t, "application/json; charset=utf-8", contentType) + + buf := new(bytes.Buffer) + buf.ReadFrom(w.Body) + jsonStringBody := buf.String() + assert.Equal(t, fmt.Sprint(`{"foo":"fooValue","bar":"barValue"}`), jsonStringBody) +} + +func TestContextError(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + assert.Empty(t, c.Errors) + + c.Error(errors.New("first error")) + assert.Len(t, c.Errors, 1) + assert.Equal(t, "Error #01: first error\n", c.Errors.String()) + + c.Error(&Error{ + Err: errors.New("second error"), + Meta: "some data 2", + Type: ErrorTypePublic, + }) + assert.Len(t, c.Errors, 2) + + assert.Equal(t, errors.New("first error"), c.Errors[0].Err) + assert.Nil(t, c.Errors[0].Meta) + assert.Equal(t, ErrorTypePrivate, c.Errors[0].Type) + + assert.Equal(t, errors.New("second error"), c.Errors[1].Err) + assert.Equal(t, "some data 2", c.Errors[1].Meta) + assert.Equal(t, ErrorTypePublic, c.Errors[1].Type) + + assert.Equal(t, c.Errors.Last(), c.Errors[1]) + + defer func() { + if recover() == nil { + t.Error("didn't panic") + } + }() + c.Error(nil) +} + +func TestContextTypedError(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Error(errors.New("externo 0")).SetType(ErrorTypePublic) + c.Error(errors.New("interno 0")).SetType(ErrorTypePrivate) + + for _, err := range c.Errors.ByType(ErrorTypePublic) { + assert.Equal(t, ErrorTypePublic, err.Type) + } + for _, err := range c.Errors.ByType(ErrorTypePrivate) { + assert.Equal(t, ErrorTypePrivate, err.Type) + } + assert.Equal(t, []string{"externo 0", "interno 0"}, c.Errors.Errors()) +} + +func TestContextAbortWithError(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.AbortWithError(401, errors.New("bad input")).SetMeta("some input") + + assert.Equal(t, 401, w.Code) + assert.Equal(t, abortIndex, c.index) + assert.True(t, c.IsAborted()) +} + +func TestContextClientIP(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("POST", "/", nil) + + c.Request.Header.Set("X-Real-IP", " 10.10.10.10 ") + c.Request.Header.Set("X-Forwarded-For", " 20.20.20.20, 30.30.30.30") + c.Request.Header.Set("X-Appengine-Remote-Addr", "50.50.50.50") + c.Request.RemoteAddr = " 40.40.40.40:42123 " + + assert.Equal(t, "20.20.20.20", c.ClientIP()) + + c.Request.Header.Del("X-Forwarded-For") + assert.Equal(t, "10.10.10.10", c.ClientIP()) + + c.Request.Header.Set("X-Forwarded-For", "30.30.30.30 ") + assert.Equal(t, "30.30.30.30", c.ClientIP()) + + c.Request.Header.Del("X-Forwarded-For") + c.Request.Header.Del("X-Real-IP") + c.engine.AppEngine = true + assert.Equal(t, "50.50.50.50", c.ClientIP()) + + c.Request.Header.Del("X-Appengine-Remote-Addr") + assert.Equal(t, "40.40.40.40", c.ClientIP()) + + // no port + c.Request.RemoteAddr = "50.50.50.50" + assert.Empty(t, c.ClientIP()) +} + +func TestContextContentType(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("POST", "/", nil) + c.Request.Header.Set("Content-Type", "application/json; charset=utf-8") + + assert.Equal(t, "application/json", c.ContentType()) +} + +func TestContextAutoBindJSON(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("POST", "/", bytes.NewBufferString("{\"foo\":\"bar\", \"bar\":\"foo\"}")) + c.Request.Header.Add("Content-Type", MIMEJSON) + + var obj struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + assert.NoError(t, c.Bind(&obj)) + assert.Equal(t, "foo", obj.Bar) + assert.Equal(t, "bar", obj.Foo) + assert.Empty(t, c.Errors) +} + +func TestContextBindWithJSON(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Request, _ = http.NewRequest("POST", "/", bytes.NewBufferString("{\"foo\":\"bar\", \"bar\":\"foo\"}")) + c.Request.Header.Add("Content-Type", MIMEXML) // set fake content-type + + var obj struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + assert.NoError(t, c.BindJSON(&obj)) + assert.Equal(t, "foo", obj.Bar) + assert.Equal(t, "bar", obj.Foo) + assert.Equal(t, 0, w.Body.Len()) +} + +func TestContextBindWithQuery(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Request, _ = http.NewRequest("POST", "/?foo=bar&bar=foo", bytes.NewBufferString("foo=unused")) + + var obj struct { + Foo string `form:"foo"` + Bar string `form:"bar"` + } + assert.NoError(t, c.BindQuery(&obj)) + assert.Equal(t, "foo", obj.Bar) + assert.Equal(t, "bar", obj.Foo) + assert.Equal(t, 0, w.Body.Len()) +} + +func TestContextBadAutoBind(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Request, _ = http.NewRequest("POST", "http://example.com", bytes.NewBufferString("\"foo\":\"bar\", \"bar\":\"foo\"}")) + c.Request.Header.Add("Content-Type", MIMEJSON) + var obj struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + assert.False(t, c.IsAborted()) + assert.Error(t, c.Bind(&obj)) + c.Writer.WriteHeaderNow() + + assert.Empty(t, obj.Bar) + assert.Empty(t, obj.Foo) + assert.Equal(t, 400, w.Code) + assert.True(t, c.IsAborted()) +} + +func TestContextAutoShouldBindJSON(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("POST", "/", bytes.NewBufferString("{\"foo\":\"bar\", \"bar\":\"foo\"}")) + c.Request.Header.Add("Content-Type", MIMEJSON) + + var obj struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + assert.NoError(t, c.ShouldBind(&obj)) + assert.Equal(t, "foo", obj.Bar) + assert.Equal(t, "bar", obj.Foo) + assert.Empty(t, c.Errors) +} + +func TestContextShouldBindWithJSON(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Request, _ = http.NewRequest("POST", "/", bytes.NewBufferString("{\"foo\":\"bar\", \"bar\":\"foo\"}")) + c.Request.Header.Add("Content-Type", MIMEXML) // set fake content-type + + var obj struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + assert.NoError(t, c.ShouldBindJSON(&obj)) + assert.Equal(t, "foo", obj.Bar) + assert.Equal(t, "bar", obj.Foo) + assert.Equal(t, 0, w.Body.Len()) +} + +func TestContextShouldBindWithQuery(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Request, _ = http.NewRequest("POST", "/?foo=bar&bar=foo", bytes.NewBufferString("foo=unused")) + + var obj struct { + Foo string `form:"foo"` + Bar string `form:"bar"` + } + assert.NoError(t, c.ShouldBindQuery(&obj)) + assert.Equal(t, "foo", obj.Bar) + assert.Equal(t, "bar", obj.Foo) + assert.Equal(t, 0, w.Body.Len()) +} + +func TestContextBadAutoShouldBind(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Request, _ = http.NewRequest("POST", "http://example.com", bytes.NewBufferString("\"foo\":\"bar\", \"bar\":\"foo\"}")) + c.Request.Header.Add("Content-Type", MIMEJSON) + var obj struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + assert.False(t, c.IsAborted()) + assert.Error(t, c.ShouldBind(&obj)) + + assert.Empty(t, obj.Bar) + assert.Empty(t, obj.Foo) + assert.False(t, c.IsAborted()) +} + +func TestContextGolangContext(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("POST", "/", bytes.NewBufferString("{\"foo\":\"bar\", \"bar\":\"foo\"}")) + assert.NoError(t, c.Err()) + assert.Nil(t, c.Done()) + ti, ok := c.Deadline() + assert.Equal(t, ti, time.Time{}) + assert.False(t, ok) + assert.Equal(t, c.Value(0), c.Request) + assert.Nil(t, c.Value("foo")) + + c.Set("foo", "bar") + assert.Equal(t, "bar", c.Value("foo")) + assert.Nil(t, c.Value(1)) +} + +func TestWebsocketsRequired(t *testing.T) { + // Example request from spec: https://tools.ietf.org/html/rfc6455#section-1.2 + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("GET", "/chat", nil) + c.Request.Header.Set("Host", "server.example.com") + c.Request.Header.Set("Upgrade", "websocket") + c.Request.Header.Set("Connection", "Upgrade") + c.Request.Header.Set("Sec-WebSocket-Key", "dGhlIHNhbXBsZSBub25jZQ==") + c.Request.Header.Set("Origin", "http://example.com") + c.Request.Header.Set("Sec-WebSocket-Protocol", "chat, superchat") + c.Request.Header.Set("Sec-WebSocket-Version", "13") + + assert.True(t, c.IsWebsocket()) + + // Normal request, no websocket required. + c, _ = CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("GET", "/chat", nil) + c.Request.Header.Set("Host", "server.example.com") + + assert.False(t, c.IsWebsocket()) +} + +func TestGetRequestHeaderValue(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + c.Request, _ = http.NewRequest("GET", "/chat", nil) + c.Request.Header.Set("Gin-Version", "1.0.0") + + assert.Equal(t, "1.0.0", c.GetHeader("Gin-Version")) + assert.Empty(t, c.GetHeader("Connection")) +} + +func TestContextGetRawData(t *testing.T) { + c, _ := CreateTestContext(httptest.NewRecorder()) + body := bytes.NewBufferString("Fetch binary post data") + c.Request, _ = http.NewRequest("POST", "/", body) + c.Request.Header.Add("Content-Type", MIMEPOSTForm) + + data, err := c.GetRawData() + assert.Nil(t, err) + assert.Equal(t, "Fetch binary post data", string(data)) +} diff --git a/vendor/github.com/gin-gonic/gin/coverage.sh b/vendor/github.com/gin-gonic/gin/coverage.sh new file mode 100644 index 000000000..81437f918 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/coverage.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -e + +echo "mode: count" > coverage.out + +for d in $(go list ./... | grep -E 'gin$|binding$|render$'); do + go test -v -covermode=count -coverprofile=profile.out $d + if [ -f profile.out ]; then + cat profile.out | grep -v "mode:" >> coverage.out + rm profile.out + fi +done diff --git a/vendor/github.com/gin-gonic/gin/debug.go b/vendor/github.com/gin-gonic/gin/debug.go new file mode 100644 index 000000000..897c4943c --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/debug.go @@ -0,0 +1,77 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bytes" + "html/template" + "log" +) + +func init() { + log.SetFlags(0) +} + +// IsDebugging returns true if the framework is running in debug mode. +// Use SetMode(gin.ReleaseMode) to disable debug mode. +func IsDebugging() bool { + return ginMode == debugCode +} + +func debugPrintRoute(httpMethod, absolutePath string, handlers HandlersChain) { + if IsDebugging() { + nuHandlers := len(handlers) + handlerName := nameOfFunction(handlers.Last()) + debugPrint("%-6s %-25s --> %s (%d handlers)\n", httpMethod, absolutePath, handlerName, nuHandlers) + } +} + +func debugPrintLoadTemplate(tmpl *template.Template) { + if IsDebugging() { + var buf bytes.Buffer + for _, tmpl := range tmpl.Templates() { + buf.WriteString("\t- ") + buf.WriteString(tmpl.Name()) + buf.WriteString("\n") + } + debugPrint("Loaded HTML Templates (%d): \n%s\n", len(tmpl.Templates()), buf.String()) + } +} + +func debugPrint(format string, values ...interface{}) { + if IsDebugging() { + log.Printf("[GIN-debug] "+format, values...) + } +} + +func debugPrintWARNINGDefault() { + debugPrint(`[WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached. + +`) +} + +func debugPrintWARNINGNew() { + debugPrint(`[WARNING] Running in "debug" mode. Switch to "release" mode in production. + - using env: export GIN_MODE=release + - using code: gin.SetMode(gin.ReleaseMode) + +`) +} + +func debugPrintWARNINGSetHTMLTemplate() { + debugPrint(`[WARNING] Since SetHTMLTemplate() is NOT thread-safe. It should only be called +at initialization. ie. before any route is registered or the router is listening in a socket: + + router := gin.Default() + router.SetHTMLTemplate(template) // << good place + +`) +} + +func debugPrintError(err error) { + if err != nil { + debugPrint("[ERROR] %v\n", err) + } +} diff --git a/vendor/github.com/gin-gonic/gin/debug_test.go b/vendor/github.com/gin-gonic/gin/debug_test.go new file mode 100644 index 000000000..dfd54c823 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/debug_test.go @@ -0,0 +1,115 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bytes" + "errors" + "html/template" + "io" + "log" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +// TODO +// func debugRoute(httpMethod, absolutePath string, handlers HandlersChain) { +// func debugPrint(format string, values ...interface{}) { + +func TestIsDebugging(t *testing.T) { + SetMode(DebugMode) + assert.True(t, IsDebugging()) + SetMode(ReleaseMode) + assert.False(t, IsDebugging()) + SetMode(TestMode) + assert.False(t, IsDebugging()) +} + +func TestDebugPrint(t *testing.T) { + var w bytes.Buffer + setup(&w) + defer teardown() + + SetMode(ReleaseMode) + debugPrint("DEBUG this!") + SetMode(TestMode) + debugPrint("DEBUG this!") + assert.Empty(t, w.String()) + + SetMode(DebugMode) + debugPrint("these are %d %s\n", 2, "error messages") + assert.Equal(t, "[GIN-debug] these are 2 error messages\n", w.String()) +} + +func TestDebugPrintError(t *testing.T) { + var w bytes.Buffer + setup(&w) + defer teardown() + + SetMode(DebugMode) + debugPrintError(nil) + assert.Empty(t, w.String()) + + debugPrintError(errors.New("this is an error")) + assert.Equal(t, "[GIN-debug] [ERROR] this is an error\n", w.String()) +} + +func TestDebugPrintRoutes(t *testing.T) { + var w bytes.Buffer + setup(&w) + defer teardown() + + debugPrintRoute("GET", "/path/to/route/:param", HandlersChain{func(c *Context) {}, handlerNameTest}) + assert.Regexp(t, `^\[GIN-debug\] GET /path/to/route/:param --> (.*/vendor/)?github.com/gin-gonic/gin.handlerNameTest \(2 handlers\)\n$`, w.String()) +} + +func TestDebugPrintLoadTemplate(t *testing.T) { + var w bytes.Buffer + setup(&w) + defer teardown() + + templ := template.Must(template.New("").Delims("{[{", "}]}").ParseGlob("./fixtures/basic/hello.tmpl")) + debugPrintLoadTemplate(templ) + assert.Regexp(t, `^\[GIN-debug\] Loaded HTML Templates \(2\): \n(\t- \n|\t- hello\.tmpl\n){2}\n`, w.String()) +} + +func TestDebugPrintWARNINGSetHTMLTemplate(t *testing.T) { + var w bytes.Buffer + setup(&w) + defer teardown() + + debugPrintWARNINGSetHTMLTemplate() + assert.Equal(t, "[GIN-debug] [WARNING] Since SetHTMLTemplate() is NOT thread-safe. It should only be called\nat initialization. ie. before any route is registered or the router is listening in a socket:\n\n\trouter := gin.Default()\n\trouter.SetHTMLTemplate(template) // << good place\n\n", w.String()) +} + +func TestDebugPrintWARNINGDefault(t *testing.T) { + var w bytes.Buffer + setup(&w) + defer teardown() + + debugPrintWARNINGDefault() + assert.Equal(t, "[GIN-debug] [WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached.\n\n", w.String()) +} + +func TestDebugPrintWARNINGNew(t *testing.T) { + var w bytes.Buffer + setup(&w) + defer teardown() + + debugPrintWARNINGNew() + assert.Equal(t, "[GIN-debug] [WARNING] Running in \"debug\" mode. Switch to \"release\" mode in production.\n - using env:\texport GIN_MODE=release\n - using code:\tgin.SetMode(gin.ReleaseMode)\n\n", w.String()) +} + +func setup(w io.Writer) { + SetMode(DebugMode) + log.SetOutput(w) +} + +func teardown() { + SetMode(TestMode) + log.SetOutput(os.Stdout) +} diff --git a/vendor/github.com/gin-gonic/gin/deprecated.go b/vendor/github.com/gin-gonic/gin/deprecated.go new file mode 100644 index 000000000..ab4474296 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/deprecated.go @@ -0,0 +1,21 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "log" + + "github.com/gin-gonic/gin/binding" +) + +// BindWith binds the passed struct pointer using the specified binding engine. +// See the binding package. +func (c *Context) BindWith(obj interface{}, b binding.Binding) error { + log.Println(`BindWith(\"interface{}, binding.Binding\") error is going to + be deprecated, please check issue #662 and either use MustBindWith() if you + want HTTP 400 to be automatically returned if any error occur, or use + ShouldBindWith() if you need to manage the error.`) + return c.MustBindWith(obj, b) +} diff --git a/vendor/github.com/gin-gonic/gin/deprecated_test.go b/vendor/github.com/gin-gonic/gin/deprecated_test.go new file mode 100644 index 000000000..7a875fe49 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/deprecated_test.go @@ -0,0 +1,31 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bytes" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gin-gonic/gin/binding" + "github.com/stretchr/testify/assert" +) + +func TestBindWith(t *testing.T) { + w := httptest.NewRecorder() + c, _ := CreateTestContext(w) + + c.Request, _ = http.NewRequest("POST", "/?foo=bar&bar=foo", bytes.NewBufferString("foo=unused")) + + var obj struct { + Foo string `form:"foo"` + Bar string `form:"bar"` + } + assert.NoError(t, c.BindWith(&obj, binding.Form)) + assert.Equal(t, "foo", obj.Bar) + assert.Equal(t, "bar", obj.Foo) + assert.Equal(t, 0, w.Body.Len()) +} diff --git a/vendor/github.com/gin-gonic/gin/doc.go b/vendor/github.com/gin-gonic/gin/doc.go new file mode 100644 index 000000000..01ac4a907 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/doc.go @@ -0,0 +1,6 @@ +/* +Package gin implements a HTTP web framework called gin. + +See https://gin-gonic.github.io/gin/ for more information about gin. +*/ +package gin // import "github.com/gin-gonic/gin" diff --git a/vendor/github.com/gin-gonic/gin/errors.go b/vendor/github.com/gin-gonic/gin/errors.go new file mode 100644 index 000000000..dbfccd856 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/errors.go @@ -0,0 +1,157 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bytes" + "fmt" + "reflect" + + "github.com/gin-gonic/gin/json" +) + +type ErrorType uint64 + +const ( + ErrorTypeBind ErrorType = 1 << 63 // used when c.Bind() fails + ErrorTypeRender ErrorType = 1 << 62 // used when c.Render() fails + ErrorTypePrivate ErrorType = 1 << 0 + ErrorTypePublic ErrorType = 1 << 1 + + ErrorTypeAny ErrorType = 1<<64 - 1 + ErrorTypeNu = 2 +) + +type Error struct { + Err error + Type ErrorType + Meta interface{} +} + +type errorMsgs []*Error + +var _ error = &Error{} + +func (msg *Error) SetType(flags ErrorType) *Error { + msg.Type = flags + return msg +} + +func (msg *Error) SetMeta(data interface{}) *Error { + msg.Meta = data + return msg +} + +func (msg *Error) JSON() interface{} { + json := H{} + if msg.Meta != nil { + value := reflect.ValueOf(msg.Meta) + switch value.Kind() { + case reflect.Struct: + return msg.Meta + case reflect.Map: + for _, key := range value.MapKeys() { + json[key.String()] = value.MapIndex(key).Interface() + } + default: + json["meta"] = msg.Meta + } + } + if _, ok := json["error"]; !ok { + json["error"] = msg.Error() + } + return json +} + +// MarshalJSON implements the json.Marshaller interface. +func (msg *Error) MarshalJSON() ([]byte, error) { + return json.Marshal(msg.JSON()) +} + +// Error implements the error interface +func (msg Error) Error() string { + return msg.Err.Error() +} + +func (msg *Error) IsType(flags ErrorType) bool { + return (msg.Type & flags) > 0 +} + +// ByType returns a readonly copy filtered the byte. +// ie ByType(gin.ErrorTypePublic) returns a slice of errors with type=ErrorTypePublic. +func (a errorMsgs) ByType(typ ErrorType) errorMsgs { + if len(a) == 0 { + return nil + } + if typ == ErrorTypeAny { + return a + } + var result errorMsgs + for _, msg := range a { + if msg.IsType(typ) { + result = append(result, msg) + } + } + return result +} + +// Last returns the last error in the slice. It returns nil if the array is empty. +// Shortcut for errors[len(errors)-1]. +func (a errorMsgs) Last() *Error { + if length := len(a); length > 0 { + return a[length-1] + } + return nil +} + +// Errors returns an array will all the error messages. +// Example: +// c.Error(errors.New("first")) +// c.Error(errors.New("second")) +// c.Error(errors.New("third")) +// c.Errors.Errors() // == []string{"first", "second", "third"} +func (a errorMsgs) Errors() []string { + if len(a) == 0 { + return nil + } + errorStrings := make([]string, len(a)) + for i, err := range a { + errorStrings[i] = err.Error() + } + return errorStrings +} + +func (a errorMsgs) JSON() interface{} { + switch len(a) { + case 0: + return nil + case 1: + return a.Last().JSON() + default: + json := make([]interface{}, len(a)) + for i, err := range a { + json[i] = err.JSON() + } + return json + } +} + +func (a errorMsgs) MarshalJSON() ([]byte, error) { + return json.Marshal(a.JSON()) +} + +func (a errorMsgs) String() string { + if len(a) == 0 { + return "" + } + var buffer bytes.Buffer + for i, msg := range a { + fmt.Fprintf(&buffer, "Error #%02d: %s\n", i+1, msg.Err) + if msg.Meta != nil { + fmt.Fprintf(&buffer, " Meta: %v\n", msg.Meta) + } + } + return buffer.String() +} diff --git a/vendor/github.com/gin-gonic/gin/errors_test.go b/vendor/github.com/gin-gonic/gin/errors_test.go new file mode 100644 index 000000000..a666d7c1b --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/errors_test.go @@ -0,0 +1,106 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "errors" + "testing" + + "github.com/gin-gonic/gin/json" + "github.com/stretchr/testify/assert" +) + +func TestError(t *testing.T) { + baseError := errors.New("test error") + err := &Error{ + Err: baseError, + Type: ErrorTypePrivate, + } + assert.Equal(t, err.Error(), baseError.Error()) + assert.Equal(t, err.JSON(), H{"error": baseError.Error()}) + + assert.Equal(t, err.SetType(ErrorTypePublic), err) + assert.Equal(t, err.Type, ErrorTypePublic) + + assert.Equal(t, err.SetMeta("some data"), err) + assert.Equal(t, err.Meta, "some data") + assert.Equal(t, err.JSON(), H{ + "error": baseError.Error(), + "meta": "some data", + }) + + jsonBytes, _ := json.Marshal(err) + assert.Equal(t, "{\"error\":\"test error\",\"meta\":\"some data\"}", string(jsonBytes)) + + err.SetMeta(H{ + "status": "200", + "data": "some data", + }) + assert.Equal(t, err.JSON(), H{ + "error": baseError.Error(), + "status": "200", + "data": "some data", + }) + + err.SetMeta(H{ + "error": "custom error", + "status": "200", + "data": "some data", + }) + assert.Equal(t, err.JSON(), H{ + "error": "custom error", + "status": "200", + "data": "some data", + }) + + type customError struct { + status string + data string + } + err.SetMeta(customError{status: "200", data: "other data"}) + assert.Equal(t, customError{status: "200", data: "other data"}, err.JSON()) +} + +func TestErrorSlice(t *testing.T) { + errs := errorMsgs{ + {Err: errors.New("first"), Type: ErrorTypePrivate}, + {Err: errors.New("second"), Type: ErrorTypePrivate, Meta: "some data"}, + {Err: errors.New("third"), Type: ErrorTypePublic, Meta: H{"status": "400"}}, + } + + assert.Equal(t, errs, errs.ByType(ErrorTypeAny)) + assert.Equal(t, "third", errs.Last().Error()) + assert.Equal(t, []string{"first", "second", "third"}, errs.Errors()) + assert.Equal(t, []string{"third"}, errs.ByType(ErrorTypePublic).Errors()) + assert.Equal(t, []string{"first", "second"}, errs.ByType(ErrorTypePrivate).Errors()) + assert.Equal(t, []string{"first", "second", "third"}, errs.ByType(ErrorTypePublic|ErrorTypePrivate).Errors()) + assert.Empty(t, errs.ByType(ErrorTypeBind)) + assert.Empty(t, errs.ByType(ErrorTypeBind).String()) + + assert.Equal(t, `Error #01: first +Error #02: second + Meta: some data +Error #03: third + Meta: map[status:400] +`, errs.String()) + assert.Equal(t, []interface{}{ + H{"error": "first"}, + H{"error": "second", "meta": "some data"}, + H{"error": "third", "status": "400"}, + }, errs.JSON()) + jsonBytes, _ := json.Marshal(errs) + assert.Equal(t, "[{\"error\":\"first\"},{\"error\":\"second\",\"meta\":\"some data\"},{\"error\":\"third\",\"status\":\"400\"}]", string(jsonBytes)) + errs = errorMsgs{ + {Err: errors.New("first"), Type: ErrorTypePrivate}, + } + assert.Equal(t, H{"error": "first"}, errs.JSON()) + jsonBytes, _ = json.Marshal(errs) + assert.Equal(t, "{\"error\":\"first\"}", string(jsonBytes)) + + errs = errorMsgs{} + assert.Nil(t, errs.Last()) + assert.Nil(t, errs.JSON()) + assert.Empty(t, errs.String()) +} diff --git a/vendor/github.com/gin-gonic/gin/examples/app-engine/README.md b/vendor/github.com/gin-gonic/gin/examples/app-engine/README.md new file mode 100644 index 000000000..48505de83 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/app-engine/README.md @@ -0,0 +1,7 @@ +# Guide to run Gin under App Engine LOCAL Development Server + +1. Download, install and setup Go in your computer. (That includes setting your `$GOPATH`.) +2. Download SDK for your platform from here: `https://developers.google.com/appengine/downloads?hl=es#Google_App_Engine_SDK_for_Go` +3. Download Gin source code using: `$ go get github.com/gin-gonic/gin` +4. Navigate to examples folder: `$ cd $GOPATH/src/github.com/gin-gonic/gin/examples/` +5. Run it: `$ goapp serve app-engine/` \ No newline at end of file diff --git a/vendor/github.com/gin-gonic/gin/examples/app-engine/app.yaml b/vendor/github.com/gin-gonic/gin/examples/app-engine/app.yaml new file mode 100644 index 000000000..5f20cf3f2 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/app-engine/app.yaml @@ -0,0 +1,8 @@ +application: hello +version: 1 +runtime: go +api_version: go1 + +handlers: +- url: /.* + script: _go_app \ No newline at end of file diff --git a/vendor/github.com/gin-gonic/gin/examples/app-engine/hello.go b/vendor/github.com/gin-gonic/gin/examples/app-engine/hello.go new file mode 100644 index 000000000..f569dadb6 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/app-engine/hello.go @@ -0,0 +1,24 @@ +package hello + +import ( + "net/http" + + "github.com/gin-gonic/gin" +) + +// This function's name is a must. App Engine uses it to drive the requests properly. +func init() { + // Starts a new Gin instance with no middle-ware + r := gin.New() + + // Define your handlers + r.GET("/", func(c *gin.Context) { + c.String(http.StatusOK, "Hello World!") + }) + r.GET("/ping", func(c *gin.Context) { + c.String(http.StatusOK, "pong") + }) + + // Handle all requests using net/http + http.Handle("/", r) +} diff --git a/vendor/github.com/gin-gonic/gin/examples/auto-tls/example1/main.go b/vendor/github.com/gin-gonic/gin/examples/auto-tls/example1/main.go new file mode 100644 index 000000000..fa9f40088 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/auto-tls/example1/main.go @@ -0,0 +1,19 @@ +package main + +import ( + "log" + + "github.com/gin-gonic/autotls" + "github.com/gin-gonic/gin" +) + +func main() { + r := gin.Default() + + // Ping handler + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + log.Fatal(autotls.Run(r, "example1.com", "example2.com")) +} diff --git a/vendor/github.com/gin-gonic/gin/examples/auto-tls/example2/main.go b/vendor/github.com/gin-gonic/gin/examples/auto-tls/example2/main.go new file mode 100644 index 000000000..017186892 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/auto-tls/example2/main.go @@ -0,0 +1,26 @@ +package main + +import ( + "log" + + "github.com/gin-gonic/autotls" + "github.com/gin-gonic/gin" + "golang.org/x/crypto/acme/autocert" +) + +func main() { + r := gin.Default() + + // Ping handler + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + m := autocert.Manager{ + Prompt: autocert.AcceptTOS, + HostPolicy: autocert.HostWhitelist("example1.com", "example2.com"), + Cache: autocert.DirCache("/var/www/.cache"), + } + + log.Fatal(autotls.RunWithManager(r, &m)) +} diff --git a/vendor/github.com/gin-gonic/gin/examples/basic/main.go b/vendor/github.com/gin-gonic/gin/examples/basic/main.go new file mode 100644 index 000000000..473c6a09d --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/basic/main.go @@ -0,0 +1,63 @@ +package main + +import ( + "github.com/gin-gonic/gin" +) + +var DB = make(map[string]string) + +func setupRouter() *gin.Engine { + // Disable Console Color + // gin.DisableConsoleColor() + r := gin.Default() + + // Ping test + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + // Get user value + r.GET("/user/:name", func(c *gin.Context) { + user := c.Params.ByName("name") + value, ok := DB[user] + if ok { + c.JSON(200, gin.H{"user": user, "value": value}) + } else { + c.JSON(200, gin.H{"user": user, "status": "no value"}) + } + }) + + // Authorized group (uses gin.BasicAuth() middleware) + // Same than: + // authorized := r.Group("/") + // authorized.Use(gin.BasicAuth(gin.Credentials{ + // "foo": "bar", + // "manu": "123", + //})) + authorized := r.Group("/", gin.BasicAuth(gin.Accounts{ + "foo": "bar", // user:foo password:bar + "manu": "123", // user:manu password:123 + })) + + authorized.POST("admin", func(c *gin.Context) { + user := c.MustGet(gin.AuthUserKey).(string) + + // Parse JSON + var json struct { + Value string `json:"value" binding:"required"` + } + + if c.Bind(&json) == nil { + DB[user] = json.Value + c.JSON(200, gin.H{"status": "ok"}) + } + }) + + return r +} + +func main() { + r := setupRouter() + // Listen and Server in 0.0.0.0:8080 + r.Run(":8080") +} diff --git a/vendor/github.com/gin-gonic/gin/examples/basic/main_test.go b/vendor/github.com/gin-gonic/gin/examples/basic/main_test.go new file mode 100644 index 000000000..61203d66f --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/basic/main_test.go @@ -0,0 +1,20 @@ +package main + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPingRoute(t *testing.T) { + router := setupRouter() + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/ping", nil) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "pong", w.Body.String()) +} diff --git a/vendor/github.com/gin-gonic/gin/examples/custom-validation/server.go b/vendor/github.com/gin-gonic/gin/examples/custom-validation/server.go new file mode 100644 index 000000000..31d449f0b --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/custom-validation/server.go @@ -0,0 +1,45 @@ +package main + +import ( + "net/http" + "reflect" + "time" + + "github.com/gin-gonic/gin" + "github.com/gin-gonic/gin/binding" + "gopkg.in/go-playground/validator.v8" +) + +type Booking struct { + CheckIn time.Time `form:"check_in" binding:"required,bookabledate" time_format:"2006-01-02"` + CheckOut time.Time `form:"check_out" binding:"required,gtfield=CheckIn" time_format:"2006-01-02"` +} + +func bookableDate( + v *validator.Validate, topStruct reflect.Value, currentStructOrField reflect.Value, + field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string, +) bool { + if date, ok := field.Interface().(time.Time); ok { + today := time.Now() + if today.Year() > date.Year() || today.YearDay() > date.YearDay() { + return false + } + } + return true +} + +func main() { + route := gin.Default() + binding.Validator.RegisterValidation("bookabledate", bookableDate) + route.GET("/bookable", getBookable) + route.Run(":8085") +} + +func getBookable(c *gin.Context) { + var b Booking + if err := c.ShouldBindWith(&b, binding.Query); err == nil { + c.JSON(http.StatusOK, gin.H{"message": "Booking dates are valid!"}) + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } +} diff --git a/vendor/github.com/gin-gonic/gin/examples/favicon/favicon.ico b/vendor/github.com/gin-gonic/gin/examples/favicon/favicon.ico new file mode 100644 index 000000000..3959cd7f9 Binary files /dev/null and b/vendor/github.com/gin-gonic/gin/examples/favicon/favicon.ico differ diff --git a/vendor/github.com/gin-gonic/gin/examples/favicon/main.go b/vendor/github.com/gin-gonic/gin/examples/favicon/main.go new file mode 100644 index 000000000..5ad39331d --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/favicon/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "github.com/gin-gonic/gin" + "github.com/thinkerou/favicon" +) + +func main() { + app := gin.Default() + app.Use(favicon.New("./favicon.ico")) + app.GET("/ping", func(c *gin.Context) { + c.String(200, "Hello favicon.") + }) + app.Run(":8080") +} diff --git a/vendor/github.com/gin-gonic/gin/examples/graceful-shutdown/close/server.go b/vendor/github.com/gin-gonic/gin/examples/graceful-shutdown/close/server.go new file mode 100644 index 000000000..9c4e90fac --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/graceful-shutdown/close/server.go @@ -0,0 +1,45 @@ +// +build go1.8 + +package main + +import ( + "log" + "net/http" + "os" + "os/signal" + + "github.com/gin-gonic/gin" +) + +func main() { + router := gin.Default() + router.GET("/", func(c *gin.Context) { + c.String(http.StatusOK, "Welcome Gin Server") + }) + + server := &http.Server{ + Addr: ":8080", + Handler: router, + } + + quit := make(chan os.Signal) + signal.Notify(quit, os.Interrupt) + + go func() { + <-quit + log.Println("receive interrupt signal") + if err := server.Close(); err != nil { + log.Fatal("Server Close:", err) + } + }() + + if err := server.ListenAndServe(); err != nil { + if err == http.ErrServerClosed { + log.Println("Server closed under request") + } else { + log.Fatal("Server closed unexpect") + } + } + + log.Println("Server exiting") +} diff --git a/vendor/github.com/gin-gonic/gin/examples/graceful-shutdown/graceful-shutdown/server.go b/vendor/github.com/gin-gonic/gin/examples/graceful-shutdown/graceful-shutdown/server.go new file mode 100644 index 000000000..6debe7f59 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/graceful-shutdown/graceful-shutdown/server.go @@ -0,0 +1,48 @@ +// +build go1.8 + +package main + +import ( + "context" + "log" + "net/http" + "os" + "os/signal" + "time" + + "github.com/gin-gonic/gin" +) + +func main() { + router := gin.Default() + router.GET("/", func(c *gin.Context) { + time.Sleep(5 * time.Second) + c.String(http.StatusOK, "Welcome Gin Server") + }) + + srv := &http.Server{ + Addr: ":8080", + Handler: router, + } + + go func() { + // service connections + if err := srv.ListenAndServe(); err != nil { + log.Printf("listen: %s\n", err) + } + }() + + // Wait for interrupt signal to gracefully shutdown the server with + // a timeout of 5 seconds. + quit := make(chan os.Signal) + signal.Notify(quit, os.Interrupt) + <-quit + log.Println("Shutdown Server ...") + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := srv.Shutdown(ctx); err != nil { + log.Fatal("Server Shutdown:", err) + } + log.Println("Server exiting") +} diff --git a/vendor/github.com/gin-gonic/gin/examples/http2/README.md b/vendor/github.com/gin-gonic/gin/examples/http2/README.md new file mode 100644 index 000000000..42dd4b8ac --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/http2/README.md @@ -0,0 +1,18 @@ +## How to generate RSA private key and digital certificate + +1. Install Openssl + +Please visit https://github.com/openssl/openssl to get pkg and install. + +2. Generate RSA private key + +```sh +$ mkdir testdata +$ openssl genrsa -out ./testdata/server.key 2048 +``` + +3. Generate digital certificate + +```sh +$ openssl req -new -x509 -key ./testdata/server.key -out ./testdata/server.pem -days 365 +``` diff --git a/vendor/github.com/gin-gonic/gin/examples/http2/main.go b/vendor/github.com/gin-gonic/gin/examples/http2/main.go new file mode 100644 index 000000000..07df01e2b --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/http2/main.go @@ -0,0 +1,37 @@ +package main + +import ( + "html/template" + "log" + "os" + + "github.com/gin-gonic/gin" +) + +var html = template.Must(template.New("https").Parse(` + + + Https Test + + +

Welcome, Ginner!

+ + +`)) + +func main() { + logger := log.New(os.Stderr, "", 0) + logger.Println("[WARNING] DON'T USE THE EMBED CERTS FROM THIS EXAMPLE IN PRODUCTION ENVIRONMENT, GENERATE YOUR OWN!") + + r := gin.Default() + r.SetHTMLTemplate(html) + + r.GET("/welcome", func(c *gin.Context) { + c.HTML(200, "https", gin.H{ + "status": "success", + }) + }) + + // Listen and Server in https://127.0.0.1:8080 + r.RunTLS(":8080", "./testdata/server.pem", "./testdata/server.key") +} diff --git a/vendor/github.com/gin-gonic/gin/examples/http2/testdata/ca.pem b/vendor/github.com/gin-gonic/gin/examples/http2/testdata/ca.pem new file mode 100644 index 000000000..6c8511a73 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/http2/testdata/ca.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla +Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 +YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT +BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 ++L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu +g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd +Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau +sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m +oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG +Dfcog5wrJytaQ6UA0wE= +-----END CERTIFICATE----- diff --git a/vendor/github.com/gin-gonic/gin/examples/http2/testdata/server.key b/vendor/github.com/gin-gonic/gin/examples/http2/testdata/server.key new file mode 100644 index 000000000..143a5b876 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/http2/testdata/server.key @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD +M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf +3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY +AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm +V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY +tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p +dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q +K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR +81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff +DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd +aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 +ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 +XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe +F98XJ7tIFfJq +-----END PRIVATE KEY----- diff --git a/vendor/github.com/gin-gonic/gin/examples/http2/testdata/server.pem b/vendor/github.com/gin-gonic/gin/examples/http2/testdata/server.pem new file mode 100644 index 000000000..f3d43fcc5 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/http2/testdata/server.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx +MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 +ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco +LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg +zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd +9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw +CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy +em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G +CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 +hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh +y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 +-----END CERTIFICATE----- diff --git a/vendor/github.com/gin-gonic/gin/examples/multiple-service/main.go b/vendor/github.com/gin-gonic/gin/examples/multiple-service/main.go new file mode 100644 index 000000000..ceddaa2ea --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/multiple-service/main.go @@ -0,0 +1,74 @@ +package main + +import ( + "log" + "net/http" + "time" + + "github.com/gin-gonic/gin" + "golang.org/x/sync/errgroup" +) + +var ( + g errgroup.Group +) + +func router01() http.Handler { + e := gin.New() + e.Use(gin.Recovery()) + e.GET("/", func(c *gin.Context) { + c.JSON( + http.StatusOK, + gin.H{ + "code": http.StatusOK, + "error": "Welcome server 01", + }, + ) + }) + + return e +} + +func router02() http.Handler { + e := gin.New() + e.Use(gin.Recovery()) + e.GET("/", func(c *gin.Context) { + c.JSON( + http.StatusOK, + gin.H{ + "code": http.StatusOK, + "error": "Welcome server 02", + }, + ) + }) + + return e +} + +func main() { + server01 := &http.Server{ + Addr: ":8080", + Handler: router01(), + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + } + + server02 := &http.Server{ + Addr: ":8081", + Handler: router02(), + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + } + + g.Go(func() error { + return server01.ListenAndServe() + }) + + g.Go(func() error { + return server02.ListenAndServe() + }) + + if err := g.Wait(); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/Makefile b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/Makefile new file mode 100644 index 000000000..104ce809d --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/Makefile @@ -0,0 +1,10 @@ +all: deps build + +.PHONY: deps +deps: + go get -d -v github.com/dustin/go-broadcast/... + go get -d -v github.com/manucorporat/stats/... + +.PHONY: build +build: deps + go build -o realtime-advanced main.go rooms.go routes.go stats.go diff --git a/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/main.go b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/main.go new file mode 100644 index 000000000..1f3c8585f --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/main.go @@ -0,0 +1,39 @@ +package main + +import ( + "fmt" + "runtime" + + "github.com/gin-gonic/gin" +) + +func main() { + ConfigRuntime() + StartWorkers() + StartGin() +} + +func ConfigRuntime() { + nuCPU := runtime.NumCPU() + runtime.GOMAXPROCS(nuCPU) + fmt.Printf("Running with %d CPUs\n", nuCPU) +} + +func StartWorkers() { + go statsWorker() +} + +func StartGin() { + gin.SetMode(gin.ReleaseMode) + + router := gin.New() + router.Use(rateLimit, gin.Recovery()) + router.LoadHTMLGlob("resources/*.templ.html") + router.Static("/static", "resources/static") + router.GET("/", index) + router.GET("/room/:roomid", roomGET) + router.POST("/room-post/:roomid", roomPOST) + router.GET("/stream/:roomid", streamRoom) + + router.Run(":80") +} diff --git a/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/room_login.templ.html b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/room_login.templ.html new file mode 100644 index 000000000..27dac3879 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/room_login.templ.html @@ -0,0 +1,208 @@ + + + + + + + Server-Sent Events. Room "{{.roomid}}" + + + + + + + + + + + + + + + + + + + + + + + +
+
+

Server-Sent Events in Go

+

Server-sent events (SSE) is a technology where a browser receives automatic updates from a server via HTTP connection. It is not websockets. Learn more.

+

The chat and the charts data is provided in realtime using the SSE implemention of Gin Framework.

+
+
+
+ + + + + + + + +
NickMessage
+
+ {{if .nick}} +
+
+ +
+
{{.nick}}
+ +
+
+ +
+ {{else}} +
+ Join the SSE real-time chat +
+ +
+
+ +
+
+ {{end}} +
+
+
+

+ ◼︎ Users
+ ◼︎ Inbound messages / sec
+ ◼︎ Outbound messages / sec
+

+
+
+
+
+
+
+

Realtime server Go stats

+
+

Memory usage

+

+

+

+

+ ◼︎ Heap bytes
+ ◼︎ Stack bytes
+

+
+
+

Allocations per second

+

+

+

+

+ ◼︎ Mallocs / sec
+ ◼︎ Frees / sec
+

+
+
+
+

MIT Open Sourced

+ +
+ +

Server-side (Go)

+
func streamRoom(c *gin.Context) {
+    roomid := c.ParamValue("roomid")
+    listener := openListener(roomid)
+    statsTicker := time.NewTicker(1 * time.Second)
+    defer closeListener(roomid, listener)
+    defer statsTicker.Stop()
+
+    c.Stream(func(w io.Writer) bool {
+        select {
+        case msg := <-listener:
+            c.SSEvent("message", msg)
+        case <-statsTicker.C:
+            c.SSEvent("stats", Stats())
+        }
+        return true
+    })
+}
+
+
+

Client-side (JS)

+
function StartSSE(roomid) {
+    var source = new EventSource('/stream/'+roomid);
+    source.addEventListener('message', newChatMessage, false);
+    source.addEventListener('stats', stats, false);
+}
+
+
+
+
+

SSE package

+
import "github.com/manucorporat/sse"
+
+func httpHandler(w http.ResponseWriter, req *http.Request) {
+    // data can be a primitive like a string, an integer or a float
+    sse.Encode(w, sse.Event{
+        Event: "message",
+        Data:  "some data\nmore data",
+    })
+
+    // also a complex type, like a map, a struct or a slice
+    sse.Encode(w, sse.Event{
+        Id:    "124",
+        Event: "message",
+        Data: map[string]interface{}{
+            "user":    "manu",
+            "date":    time.Now().Unix(),
+            "content": "hi!",
+        },
+    })
+}
+
event: message
+data: some data\\nmore data
+
+id: 124
+event: message
+data: {"content":"hi!","date":1431540810,"user":"manu"}
+
+
+
+ +
+ + diff --git a/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/static/epoch.min.css b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/static/epoch.min.css new file mode 100644 index 000000000..47a80cdc2 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/static/epoch.min.css @@ -0,0 +1 @@ +.epoch .axis path,.epoch .axis line{shape-rendering:crispEdges;}.epoch .axis.canvas .tick line{shape-rendering:geometricPrecision;}div#_canvas_css_reference{width:0;height:0;position:absolute;top:-1000px;left:-1000px;}div#_canvas_css_reference svg{position:absolute;width:0;height:0;top:-1000px;left:-1000px;}.epoch{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:12pt;}.epoch .axis path,.epoch .axis line{fill:none;stroke:#000;}.epoch .axis .tick text{font-size:9pt;}.epoch .line{fill:none;stroke-width:2px;}.epoch.sparklines .line{stroke-width:1px;}.epoch .area{stroke:none;}.epoch .arc.pie{stroke:#fff;stroke-width:1.5px;}.epoch .arc.pie text{stroke:none;fill:white;font-size:9pt;}.epoch .gauge-labels .value{text-anchor:middle;font-size:140%;fill:#666;}.epoch.gauge-tiny{width:120px;height:90px;}.epoch.gauge-tiny .gauge-labels .value{font-size:80%;}.epoch.gauge-tiny .gauge .arc.outer{stroke-width:2px;}.epoch.gauge-small{width:180px;height:135px;}.epoch.gauge-small .gauge-labels .value{font-size:120%;}.epoch.gauge-small .gauge .arc.outer{stroke-width:3px;}.epoch.gauge-medium{width:240px;height:180px;}.epoch.gauge-medium .gauge .arc.outer{stroke-width:3px;}.epoch.gauge-large{width:320px;height:240px;}.epoch.gauge-large .gauge-labels .value{font-size:180%;}.epoch .gauge .arc.outer{stroke-width:4px;stroke:#666;}.epoch .gauge .arc.inner{stroke-width:1px;stroke:#555;}.epoch .gauge .tick{stroke-width:1px;stroke:#555;}.epoch .gauge .needle{fill:orange;}.epoch .gauge .needle-base{fill:#666;}.epoch div.ref.category1,.epoch.category10 div.ref.category1{background-color:#1f77b4;}.epoch .category1 .line,.epoch.category10 .category1 .line{stroke:#1f77b4;}.epoch .category1 .area,.epoch .category1 .dot,.epoch.category10 .category1 .area,.epoch.category10 .category1 .dot{fill:#1f77b4;stroke:rgba(0, 0, 0, 0);}.epoch .arc.category1 path,.epoch.category10 .arc.category1 path{fill:#1f77b4;}.epoch .bar.category1,.epoch.category10 .bar.category1{fill:#1f77b4;}.epoch div.ref.category2,.epoch.category10 div.ref.category2{background-color:#ff7f0e;}.epoch .category2 .line,.epoch.category10 .category2 .line{stroke:#ff7f0e;}.epoch .category2 .area,.epoch .category2 .dot,.epoch.category10 .category2 .area,.epoch.category10 .category2 .dot{fill:#ff7f0e;stroke:rgba(0, 0, 0, 0);}.epoch .arc.category2 path,.epoch.category10 .arc.category2 path{fill:#ff7f0e;}.epoch .bar.category2,.epoch.category10 .bar.category2{fill:#ff7f0e;}.epoch div.ref.category3,.epoch.category10 div.ref.category3{background-color:#2ca02c;}.epoch .category3 .line,.epoch.category10 .category3 .line{stroke:#2ca02c;}.epoch .category3 .area,.epoch .category3 .dot,.epoch.category10 .category3 .area,.epoch.category10 .category3 .dot{fill:#2ca02c;stroke:rgba(0, 0, 0, 0);}.epoch .arc.category3 path,.epoch.category10 .arc.category3 path{fill:#2ca02c;}.epoch .bar.category3,.epoch.category10 .bar.category3{fill:#2ca02c;}.epoch div.ref.category4,.epoch.category10 div.ref.category4{background-color:#d62728;}.epoch .category4 .line,.epoch.category10 .category4 .line{stroke:#d62728;}.epoch .category4 .area,.epoch .category4 .dot,.epoch.category10 .category4 .area,.epoch.category10 .category4 .dot{fill:#d62728;stroke:rgba(0, 0, 0, 0);}.epoch .arc.category4 path,.epoch.category10 .arc.category4 path{fill:#d62728;}.epoch .bar.category4,.epoch.category10 .bar.category4{fill:#d62728;}.epoch div.ref.category5,.epoch.category10 div.ref.category5{background-color:#9467bd;}.epoch .category5 .line,.epoch.category10 .category5 .line{stroke:#9467bd;}.epoch .category5 .area,.epoch .category5 .dot,.epoch.category10 .category5 .area,.epoch.category10 .category5 .dot{fill:#9467bd;stroke:rgba(0, 0, 0, 0);}.epoch .arc.category5 path,.epoch.category10 .arc.category5 path{fill:#9467bd;}.epoch .bar.category5,.epoch.category10 .bar.category5{fill:#9467bd;}.epoch div.ref.category6,.epoch.category10 div.ref.category6{background-color:#8c564b;}.epoch .category6 .line,.epoch.category10 .category6 .line{stroke:#8c564b;}.epoch .category6 .area,.epoch .category6 .dot,.epoch.category10 .category6 .area,.epoch.category10 .category6 .dot{fill:#8c564b;stroke:rgba(0, 0, 0, 0);}.epoch .arc.category6 path,.epoch.category10 .arc.category6 path{fill:#8c564b;}.epoch .bar.category6,.epoch.category10 .bar.category6{fill:#8c564b;}.epoch div.ref.category7,.epoch.category10 div.ref.category7{background-color:#e377c2;}.epoch .category7 .line,.epoch.category10 .category7 .line{stroke:#e377c2;}.epoch .category7 .area,.epoch .category7 .dot,.epoch.category10 .category7 .area,.epoch.category10 .category7 .dot{fill:#e377c2;stroke:rgba(0, 0, 0, 0);}.epoch .arc.category7 path,.epoch.category10 .arc.category7 path{fill:#e377c2;}.epoch .bar.category7,.epoch.category10 .bar.category7{fill:#e377c2;}.epoch div.ref.category8,.epoch.category10 div.ref.category8{background-color:#7f7f7f;}.epoch .category8 .line,.epoch.category10 .category8 .line{stroke:#7f7f7f;}.epoch .category8 .area,.epoch .category8 .dot,.epoch.category10 .category8 .area,.epoch.category10 .category8 .dot{fill:#7f7f7f;stroke:rgba(0, 0, 0, 0);}.epoch .arc.category8 path,.epoch.category10 .arc.category8 path{fill:#7f7f7f;}.epoch .bar.category8,.epoch.category10 .bar.category8{fill:#7f7f7f;}.epoch div.ref.category9,.epoch.category10 div.ref.category9{background-color:#bcbd22;}.epoch .category9 .line,.epoch.category10 .category9 .line{stroke:#bcbd22;}.epoch .category9 .area,.epoch .category9 .dot,.epoch.category10 .category9 .area,.epoch.category10 .category9 .dot{fill:#bcbd22;stroke:rgba(0, 0, 0, 0);}.epoch .arc.category9 path,.epoch.category10 .arc.category9 path{fill:#bcbd22;}.epoch .bar.category9,.epoch.category10 .bar.category9{fill:#bcbd22;}.epoch div.ref.category10,.epoch.category10 div.ref.category10{background-color:#17becf;}.epoch .category10 .line,.epoch.category10 .category10 .line{stroke:#17becf;}.epoch .category10 .area,.epoch .category10 .dot,.epoch.category10 .category10 .area,.epoch.category10 .category10 .dot{fill:#17becf;stroke:rgba(0, 0, 0, 0);}.epoch .arc.category10 path,.epoch.category10 .arc.category10 path{fill:#17becf;}.epoch .bar.category10,.epoch.category10 .bar.category10{fill:#17becf;}.epoch.category20 div.ref.category1{background-color:#1f77b4;}.epoch.category20 .category1 .line{stroke:#1f77b4;}.epoch.category20 .category1 .area,.epoch.category20 .category1 .dot{fill:#1f77b4;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category1 path{fill:#1f77b4;}.epoch.category20 .bar.category1{fill:#1f77b4;}.epoch.category20 div.ref.category2{background-color:#aec7e8;}.epoch.category20 .category2 .line{stroke:#aec7e8;}.epoch.category20 .category2 .area,.epoch.category20 .category2 .dot{fill:#aec7e8;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category2 path{fill:#aec7e8;}.epoch.category20 .bar.category2{fill:#aec7e8;}.epoch.category20 div.ref.category3{background-color:#ff7f0e;}.epoch.category20 .category3 .line{stroke:#ff7f0e;}.epoch.category20 .category3 .area,.epoch.category20 .category3 .dot{fill:#ff7f0e;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category3 path{fill:#ff7f0e;}.epoch.category20 .bar.category3{fill:#ff7f0e;}.epoch.category20 div.ref.category4{background-color:#ffbb78;}.epoch.category20 .category4 .line{stroke:#ffbb78;}.epoch.category20 .category4 .area,.epoch.category20 .category4 .dot{fill:#ffbb78;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category4 path{fill:#ffbb78;}.epoch.category20 .bar.category4{fill:#ffbb78;}.epoch.category20 div.ref.category5{background-color:#2ca02c;}.epoch.category20 .category5 .line{stroke:#2ca02c;}.epoch.category20 .category5 .area,.epoch.category20 .category5 .dot{fill:#2ca02c;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category5 path{fill:#2ca02c;}.epoch.category20 .bar.category5{fill:#2ca02c;}.epoch.category20 div.ref.category6{background-color:#98df8a;}.epoch.category20 .category6 .line{stroke:#98df8a;}.epoch.category20 .category6 .area,.epoch.category20 .category6 .dot{fill:#98df8a;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category6 path{fill:#98df8a;}.epoch.category20 .bar.category6{fill:#98df8a;}.epoch.category20 div.ref.category7{background-color:#d62728;}.epoch.category20 .category7 .line{stroke:#d62728;}.epoch.category20 .category7 .area,.epoch.category20 .category7 .dot{fill:#d62728;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category7 path{fill:#d62728;}.epoch.category20 .bar.category7{fill:#d62728;}.epoch.category20 div.ref.category8{background-color:#ff9896;}.epoch.category20 .category8 .line{stroke:#ff9896;}.epoch.category20 .category8 .area,.epoch.category20 .category8 .dot{fill:#ff9896;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category8 path{fill:#ff9896;}.epoch.category20 .bar.category8{fill:#ff9896;}.epoch.category20 div.ref.category9{background-color:#9467bd;}.epoch.category20 .category9 .line{stroke:#9467bd;}.epoch.category20 .category9 .area,.epoch.category20 .category9 .dot{fill:#9467bd;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category9 path{fill:#9467bd;}.epoch.category20 .bar.category9{fill:#9467bd;}.epoch.category20 div.ref.category10{background-color:#c5b0d5;}.epoch.category20 .category10 .line{stroke:#c5b0d5;}.epoch.category20 .category10 .area,.epoch.category20 .category10 .dot{fill:#c5b0d5;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category10 path{fill:#c5b0d5;}.epoch.category20 .bar.category10{fill:#c5b0d5;}.epoch.category20 div.ref.category11{background-color:#8c564b;}.epoch.category20 .category11 .line{stroke:#8c564b;}.epoch.category20 .category11 .area,.epoch.category20 .category11 .dot{fill:#8c564b;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category11 path{fill:#8c564b;}.epoch.category20 .bar.category11{fill:#8c564b;}.epoch.category20 div.ref.category12{background-color:#c49c94;}.epoch.category20 .category12 .line{stroke:#c49c94;}.epoch.category20 .category12 .area,.epoch.category20 .category12 .dot{fill:#c49c94;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category12 path{fill:#c49c94;}.epoch.category20 .bar.category12{fill:#c49c94;}.epoch.category20 div.ref.category13{background-color:#e377c2;}.epoch.category20 .category13 .line{stroke:#e377c2;}.epoch.category20 .category13 .area,.epoch.category20 .category13 .dot{fill:#e377c2;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category13 path{fill:#e377c2;}.epoch.category20 .bar.category13{fill:#e377c2;}.epoch.category20 div.ref.category14{background-color:#f7b6d2;}.epoch.category20 .category14 .line{stroke:#f7b6d2;}.epoch.category20 .category14 .area,.epoch.category20 .category14 .dot{fill:#f7b6d2;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category14 path{fill:#f7b6d2;}.epoch.category20 .bar.category14{fill:#f7b6d2;}.epoch.category20 div.ref.category15{background-color:#7f7f7f;}.epoch.category20 .category15 .line{stroke:#7f7f7f;}.epoch.category20 .category15 .area,.epoch.category20 .category15 .dot{fill:#7f7f7f;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category15 path{fill:#7f7f7f;}.epoch.category20 .bar.category15{fill:#7f7f7f;}.epoch.category20 div.ref.category16{background-color:#c7c7c7;}.epoch.category20 .category16 .line{stroke:#c7c7c7;}.epoch.category20 .category16 .area,.epoch.category20 .category16 .dot{fill:#c7c7c7;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category16 path{fill:#c7c7c7;}.epoch.category20 .bar.category16{fill:#c7c7c7;}.epoch.category20 div.ref.category17{background-color:#bcbd22;}.epoch.category20 .category17 .line{stroke:#bcbd22;}.epoch.category20 .category17 .area,.epoch.category20 .category17 .dot{fill:#bcbd22;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category17 path{fill:#bcbd22;}.epoch.category20 .bar.category17{fill:#bcbd22;}.epoch.category20 div.ref.category18{background-color:#dbdb8d;}.epoch.category20 .category18 .line{stroke:#dbdb8d;}.epoch.category20 .category18 .area,.epoch.category20 .category18 .dot{fill:#dbdb8d;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category18 path{fill:#dbdb8d;}.epoch.category20 .bar.category18{fill:#dbdb8d;}.epoch.category20 div.ref.category19{background-color:#17becf;}.epoch.category20 .category19 .line{stroke:#17becf;}.epoch.category20 .category19 .area,.epoch.category20 .category19 .dot{fill:#17becf;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category19 path{fill:#17becf;}.epoch.category20 .bar.category19{fill:#17becf;}.epoch.category20 div.ref.category20{background-color:#9edae5;}.epoch.category20 .category20 .line{stroke:#9edae5;}.epoch.category20 .category20 .area,.epoch.category20 .category20 .dot{fill:#9edae5;stroke:rgba(0, 0, 0, 0);}.epoch.category20 .arc.category20 path{fill:#9edae5;}.epoch.category20 .bar.category20{fill:#9edae5;}.epoch.category20b div.ref.category1{background-color:#393b79;}.epoch.category20b .category1 .line{stroke:#393b79;}.epoch.category20b .category1 .area,.epoch.category20b .category1 .dot{fill:#393b79;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category1 path{fill:#393b79;}.epoch.category20b .bar.category1{fill:#393b79;}.epoch.category20b div.ref.category2{background-color:#5254a3;}.epoch.category20b .category2 .line{stroke:#5254a3;}.epoch.category20b .category2 .area,.epoch.category20b .category2 .dot{fill:#5254a3;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category2 path{fill:#5254a3;}.epoch.category20b .bar.category2{fill:#5254a3;}.epoch.category20b div.ref.category3{background-color:#6b6ecf;}.epoch.category20b .category3 .line{stroke:#6b6ecf;}.epoch.category20b .category3 .area,.epoch.category20b .category3 .dot{fill:#6b6ecf;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category3 path{fill:#6b6ecf;}.epoch.category20b .bar.category3{fill:#6b6ecf;}.epoch.category20b div.ref.category4{background-color:#9c9ede;}.epoch.category20b .category4 .line{stroke:#9c9ede;}.epoch.category20b .category4 .area,.epoch.category20b .category4 .dot{fill:#9c9ede;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category4 path{fill:#9c9ede;}.epoch.category20b .bar.category4{fill:#9c9ede;}.epoch.category20b div.ref.category5{background-color:#637939;}.epoch.category20b .category5 .line{stroke:#637939;}.epoch.category20b .category5 .area,.epoch.category20b .category5 .dot{fill:#637939;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category5 path{fill:#637939;}.epoch.category20b .bar.category5{fill:#637939;}.epoch.category20b div.ref.category6{background-color:#8ca252;}.epoch.category20b .category6 .line{stroke:#8ca252;}.epoch.category20b .category6 .area,.epoch.category20b .category6 .dot{fill:#8ca252;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category6 path{fill:#8ca252;}.epoch.category20b .bar.category6{fill:#8ca252;}.epoch.category20b div.ref.category7{background-color:#b5cf6b;}.epoch.category20b .category7 .line{stroke:#b5cf6b;}.epoch.category20b .category7 .area,.epoch.category20b .category7 .dot{fill:#b5cf6b;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category7 path{fill:#b5cf6b;}.epoch.category20b .bar.category7{fill:#b5cf6b;}.epoch.category20b div.ref.category8{background-color:#cedb9c;}.epoch.category20b .category8 .line{stroke:#cedb9c;}.epoch.category20b .category8 .area,.epoch.category20b .category8 .dot{fill:#cedb9c;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category8 path{fill:#cedb9c;}.epoch.category20b .bar.category8{fill:#cedb9c;}.epoch.category20b div.ref.category9{background-color:#8c6d31;}.epoch.category20b .category9 .line{stroke:#8c6d31;}.epoch.category20b .category9 .area,.epoch.category20b .category9 .dot{fill:#8c6d31;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category9 path{fill:#8c6d31;}.epoch.category20b .bar.category9{fill:#8c6d31;}.epoch.category20b div.ref.category10{background-color:#bd9e39;}.epoch.category20b .category10 .line{stroke:#bd9e39;}.epoch.category20b .category10 .area,.epoch.category20b .category10 .dot{fill:#bd9e39;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category10 path{fill:#bd9e39;}.epoch.category20b .bar.category10{fill:#bd9e39;}.epoch.category20b div.ref.category11{background-color:#e7ba52;}.epoch.category20b .category11 .line{stroke:#e7ba52;}.epoch.category20b .category11 .area,.epoch.category20b .category11 .dot{fill:#e7ba52;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category11 path{fill:#e7ba52;}.epoch.category20b .bar.category11{fill:#e7ba52;}.epoch.category20b div.ref.category12{background-color:#e7cb94;}.epoch.category20b .category12 .line{stroke:#e7cb94;}.epoch.category20b .category12 .area,.epoch.category20b .category12 .dot{fill:#e7cb94;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category12 path{fill:#e7cb94;}.epoch.category20b .bar.category12{fill:#e7cb94;}.epoch.category20b div.ref.category13{background-color:#843c39;}.epoch.category20b .category13 .line{stroke:#843c39;}.epoch.category20b .category13 .area,.epoch.category20b .category13 .dot{fill:#843c39;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category13 path{fill:#843c39;}.epoch.category20b .bar.category13{fill:#843c39;}.epoch.category20b div.ref.category14{background-color:#ad494a;}.epoch.category20b .category14 .line{stroke:#ad494a;}.epoch.category20b .category14 .area,.epoch.category20b .category14 .dot{fill:#ad494a;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category14 path{fill:#ad494a;}.epoch.category20b .bar.category14{fill:#ad494a;}.epoch.category20b div.ref.category15{background-color:#d6616b;}.epoch.category20b .category15 .line{stroke:#d6616b;}.epoch.category20b .category15 .area,.epoch.category20b .category15 .dot{fill:#d6616b;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category15 path{fill:#d6616b;}.epoch.category20b .bar.category15{fill:#d6616b;}.epoch.category20b div.ref.category16{background-color:#e7969c;}.epoch.category20b .category16 .line{stroke:#e7969c;}.epoch.category20b .category16 .area,.epoch.category20b .category16 .dot{fill:#e7969c;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category16 path{fill:#e7969c;}.epoch.category20b .bar.category16{fill:#e7969c;}.epoch.category20b div.ref.category17{background-color:#7b4173;}.epoch.category20b .category17 .line{stroke:#7b4173;}.epoch.category20b .category17 .area,.epoch.category20b .category17 .dot{fill:#7b4173;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category17 path{fill:#7b4173;}.epoch.category20b .bar.category17{fill:#7b4173;}.epoch.category20b div.ref.category18{background-color:#a55194;}.epoch.category20b .category18 .line{stroke:#a55194;}.epoch.category20b .category18 .area,.epoch.category20b .category18 .dot{fill:#a55194;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category18 path{fill:#a55194;}.epoch.category20b .bar.category18{fill:#a55194;}.epoch.category20b div.ref.category19{background-color:#ce6dbd;}.epoch.category20b .category19 .line{stroke:#ce6dbd;}.epoch.category20b .category19 .area,.epoch.category20b .category19 .dot{fill:#ce6dbd;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category19 path{fill:#ce6dbd;}.epoch.category20b .bar.category19{fill:#ce6dbd;}.epoch.category20b div.ref.category20{background-color:#de9ed6;}.epoch.category20b .category20 .line{stroke:#de9ed6;}.epoch.category20b .category20 .area,.epoch.category20b .category20 .dot{fill:#de9ed6;stroke:rgba(0, 0, 0, 0);}.epoch.category20b .arc.category20 path{fill:#de9ed6;}.epoch.category20b .bar.category20{fill:#de9ed6;}.epoch.category20c div.ref.category1{background-color:#3182bd;}.epoch.category20c .category1 .line{stroke:#3182bd;}.epoch.category20c .category1 .area,.epoch.category20c .category1 .dot{fill:#3182bd;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category1 path{fill:#3182bd;}.epoch.category20c .bar.category1{fill:#3182bd;}.epoch.category20c div.ref.category2{background-color:#6baed6;}.epoch.category20c .category2 .line{stroke:#6baed6;}.epoch.category20c .category2 .area,.epoch.category20c .category2 .dot{fill:#6baed6;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category2 path{fill:#6baed6;}.epoch.category20c .bar.category2{fill:#6baed6;}.epoch.category20c div.ref.category3{background-color:#9ecae1;}.epoch.category20c .category3 .line{stroke:#9ecae1;}.epoch.category20c .category3 .area,.epoch.category20c .category3 .dot{fill:#9ecae1;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category3 path{fill:#9ecae1;}.epoch.category20c .bar.category3{fill:#9ecae1;}.epoch.category20c div.ref.category4{background-color:#c6dbef;}.epoch.category20c .category4 .line{stroke:#c6dbef;}.epoch.category20c .category4 .area,.epoch.category20c .category4 .dot{fill:#c6dbef;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category4 path{fill:#c6dbef;}.epoch.category20c .bar.category4{fill:#c6dbef;}.epoch.category20c div.ref.category5{background-color:#e6550d;}.epoch.category20c .category5 .line{stroke:#e6550d;}.epoch.category20c .category5 .area,.epoch.category20c .category5 .dot{fill:#e6550d;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category5 path{fill:#e6550d;}.epoch.category20c .bar.category5{fill:#e6550d;}.epoch.category20c div.ref.category6{background-color:#fd8d3c;}.epoch.category20c .category6 .line{stroke:#fd8d3c;}.epoch.category20c .category6 .area,.epoch.category20c .category6 .dot{fill:#fd8d3c;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category6 path{fill:#fd8d3c;}.epoch.category20c .bar.category6{fill:#fd8d3c;}.epoch.category20c div.ref.category7{background-color:#fdae6b;}.epoch.category20c .category7 .line{stroke:#fdae6b;}.epoch.category20c .category7 .area,.epoch.category20c .category7 .dot{fill:#fdae6b;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category7 path{fill:#fdae6b;}.epoch.category20c .bar.category7{fill:#fdae6b;}.epoch.category20c div.ref.category8{background-color:#fdd0a2;}.epoch.category20c .category8 .line{stroke:#fdd0a2;}.epoch.category20c .category8 .area,.epoch.category20c .category8 .dot{fill:#fdd0a2;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category8 path{fill:#fdd0a2;}.epoch.category20c .bar.category8{fill:#fdd0a2;}.epoch.category20c div.ref.category9{background-color:#31a354;}.epoch.category20c .category9 .line{stroke:#31a354;}.epoch.category20c .category9 .area,.epoch.category20c .category9 .dot{fill:#31a354;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category9 path{fill:#31a354;}.epoch.category20c .bar.category9{fill:#31a354;}.epoch.category20c div.ref.category10{background-color:#74c476;}.epoch.category20c .category10 .line{stroke:#74c476;}.epoch.category20c .category10 .area,.epoch.category20c .category10 .dot{fill:#74c476;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category10 path{fill:#74c476;}.epoch.category20c .bar.category10{fill:#74c476;}.epoch.category20c div.ref.category11{background-color:#a1d99b;}.epoch.category20c .category11 .line{stroke:#a1d99b;}.epoch.category20c .category11 .area,.epoch.category20c .category11 .dot{fill:#a1d99b;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category11 path{fill:#a1d99b;}.epoch.category20c .bar.category11{fill:#a1d99b;}.epoch.category20c div.ref.category12{background-color:#c7e9c0;}.epoch.category20c .category12 .line{stroke:#c7e9c0;}.epoch.category20c .category12 .area,.epoch.category20c .category12 .dot{fill:#c7e9c0;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category12 path{fill:#c7e9c0;}.epoch.category20c .bar.category12{fill:#c7e9c0;}.epoch.category20c div.ref.category13{background-color:#756bb1;}.epoch.category20c .category13 .line{stroke:#756bb1;}.epoch.category20c .category13 .area,.epoch.category20c .category13 .dot{fill:#756bb1;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category13 path{fill:#756bb1;}.epoch.category20c .bar.category13{fill:#756bb1;}.epoch.category20c div.ref.category14{background-color:#9e9ac8;}.epoch.category20c .category14 .line{stroke:#9e9ac8;}.epoch.category20c .category14 .area,.epoch.category20c .category14 .dot{fill:#9e9ac8;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category14 path{fill:#9e9ac8;}.epoch.category20c .bar.category14{fill:#9e9ac8;}.epoch.category20c div.ref.category15{background-color:#bcbddc;}.epoch.category20c .category15 .line{stroke:#bcbddc;}.epoch.category20c .category15 .area,.epoch.category20c .category15 .dot{fill:#bcbddc;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category15 path{fill:#bcbddc;}.epoch.category20c .bar.category15{fill:#bcbddc;}.epoch.category20c div.ref.category16{background-color:#dadaeb;}.epoch.category20c .category16 .line{stroke:#dadaeb;}.epoch.category20c .category16 .area,.epoch.category20c .category16 .dot{fill:#dadaeb;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category16 path{fill:#dadaeb;}.epoch.category20c .bar.category16{fill:#dadaeb;}.epoch.category20c div.ref.category17{background-color:#636363;}.epoch.category20c .category17 .line{stroke:#636363;}.epoch.category20c .category17 .area,.epoch.category20c .category17 .dot{fill:#636363;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category17 path{fill:#636363;}.epoch.category20c .bar.category17{fill:#636363;}.epoch.category20c div.ref.category18{background-color:#969696;}.epoch.category20c .category18 .line{stroke:#969696;}.epoch.category20c .category18 .area,.epoch.category20c .category18 .dot{fill:#969696;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category18 path{fill:#969696;}.epoch.category20c .bar.category18{fill:#969696;}.epoch.category20c div.ref.category19{background-color:#bdbdbd;}.epoch.category20c .category19 .line{stroke:#bdbdbd;}.epoch.category20c .category19 .area,.epoch.category20c .category19 .dot{fill:#bdbdbd;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category19 path{fill:#bdbdbd;}.epoch.category20c .bar.category19{fill:#bdbdbd;}.epoch.category20c div.ref.category20{background-color:#d9d9d9;}.epoch.category20c .category20 .line{stroke:#d9d9d9;}.epoch.category20c .category20 .area,.epoch.category20c .category20 .dot{fill:#d9d9d9;stroke:rgba(0, 0, 0, 0);}.epoch.category20c .arc.category20 path{fill:#d9d9d9;}.epoch.category20c .bar.category20{fill:#d9d9d9;}.epoch .category1 .bucket,.epoch.heatmap5 .category1 .bucket{fill:#1f77b4;}.epoch .category2 .bucket,.epoch.heatmap5 .category2 .bucket{fill:#2ca02c;}.epoch .category3 .bucket,.epoch.heatmap5 .category3 .bucket{fill:#d62728;}.epoch .category4 .bucket,.epoch.heatmap5 .category4 .bucket{fill:#8c564b;}.epoch .category5 .bucket,.epoch.heatmap5 .category5 .bucket{fill:#7f7f7f;}.epoch-theme-dark .epoch .axis path,.epoch-theme-dark .epoch .axis line{stroke:#d0d0d0;}.epoch-theme-dark .epoch .axis .tick text{fill:#d0d0d0;}.epoch-theme-dark .arc.pie{stroke:#333;}.epoch-theme-dark .arc.pie text{fill:#333;}.epoch-theme-dark .epoch .gauge-labels .value{fill:#BBB;}.epoch-theme-dark .epoch .gauge .arc.outer{stroke:#999;}.epoch-theme-dark .epoch .gauge .arc.inner{stroke:#AAA;}.epoch-theme-dark .epoch .gauge .tick{stroke:#AAA;}.epoch-theme-dark .epoch .gauge .needle{fill:#F3DE88;}.epoch-theme-dark .epoch .gauge .needle-base{fill:#999;}.epoch-theme-dark .epoch div.ref.category1,.epoch-theme-dark .epoch.category10 div.ref.category1{background-color:#909CFF;}.epoch-theme-dark .epoch .category1 .line,.epoch-theme-dark .epoch.category10 .category1 .line{stroke:#909CFF;}.epoch-theme-dark .epoch .category1 .area,.epoch-theme-dark .epoch .category1 .dot,.epoch-theme-dark .epoch.category10 .category1 .area,.epoch-theme-dark .epoch.category10 .category1 .dot{fill:#909CFF;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch .arc.category1 path,.epoch-theme-dark .epoch.category10 .arc.category1 path{fill:#909CFF;}.epoch-theme-dark .epoch .bar.category1,.epoch-theme-dark .epoch.category10 .bar.category1{fill:#909CFF;}.epoch-theme-dark .epoch div.ref.category2,.epoch-theme-dark .epoch.category10 div.ref.category2{background-color:#FFAC89;}.epoch-theme-dark .epoch .category2 .line,.epoch-theme-dark .epoch.category10 .category2 .line{stroke:#FFAC89;}.epoch-theme-dark .epoch .category2 .area,.epoch-theme-dark .epoch .category2 .dot,.epoch-theme-dark .epoch.category10 .category2 .area,.epoch-theme-dark .epoch.category10 .category2 .dot{fill:#FFAC89;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch .arc.category2 path,.epoch-theme-dark .epoch.category10 .arc.category2 path{fill:#FFAC89;}.epoch-theme-dark .epoch .bar.category2,.epoch-theme-dark .epoch.category10 .bar.category2{fill:#FFAC89;}.epoch-theme-dark .epoch div.ref.category3,.epoch-theme-dark .epoch.category10 div.ref.category3{background-color:#E889E8;}.epoch-theme-dark .epoch .category3 .line,.epoch-theme-dark .epoch.category10 .category3 .line{stroke:#E889E8;}.epoch-theme-dark .epoch .category3 .area,.epoch-theme-dark .epoch .category3 .dot,.epoch-theme-dark .epoch.category10 .category3 .area,.epoch-theme-dark .epoch.category10 .category3 .dot{fill:#E889E8;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch .arc.category3 path,.epoch-theme-dark .epoch.category10 .arc.category3 path{fill:#E889E8;}.epoch-theme-dark .epoch .bar.category3,.epoch-theme-dark .epoch.category10 .bar.category3{fill:#E889E8;}.epoch-theme-dark .epoch div.ref.category4,.epoch-theme-dark .epoch.category10 div.ref.category4{background-color:#78E8D3;}.epoch-theme-dark .epoch .category4 .line,.epoch-theme-dark .epoch.category10 .category4 .line{stroke:#78E8D3;}.epoch-theme-dark .epoch .category4 .area,.epoch-theme-dark .epoch .category4 .dot,.epoch-theme-dark .epoch.category10 .category4 .area,.epoch-theme-dark .epoch.category10 .category4 .dot{fill:#78E8D3;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch .arc.category4 path,.epoch-theme-dark .epoch.category10 .arc.category4 path{fill:#78E8D3;}.epoch-theme-dark .epoch .bar.category4,.epoch-theme-dark .epoch.category10 .bar.category4{fill:#78E8D3;}.epoch-theme-dark .epoch div.ref.category5,.epoch-theme-dark .epoch.category10 div.ref.category5{background-color:#C2FF97;}.epoch-theme-dark .epoch .category5 .line,.epoch-theme-dark .epoch.category10 .category5 .line{stroke:#C2FF97;}.epoch-theme-dark .epoch .category5 .area,.epoch-theme-dark .epoch .category5 .dot,.epoch-theme-dark .epoch.category10 .category5 .area,.epoch-theme-dark .epoch.category10 .category5 .dot{fill:#C2FF97;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch .arc.category5 path,.epoch-theme-dark .epoch.category10 .arc.category5 path{fill:#C2FF97;}.epoch-theme-dark .epoch .bar.category5,.epoch-theme-dark .epoch.category10 .bar.category5{fill:#C2FF97;}.epoch-theme-dark .epoch div.ref.category6,.epoch-theme-dark .epoch.category10 div.ref.category6{background-color:#B7BCD1;}.epoch-theme-dark .epoch .category6 .line,.epoch-theme-dark .epoch.category10 .category6 .line{stroke:#B7BCD1;}.epoch-theme-dark .epoch .category6 .area,.epoch-theme-dark .epoch .category6 .dot,.epoch-theme-dark .epoch.category10 .category6 .area,.epoch-theme-dark .epoch.category10 .category6 .dot{fill:#B7BCD1;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch .arc.category6 path,.epoch-theme-dark .epoch.category10 .arc.category6 path{fill:#B7BCD1;}.epoch-theme-dark .epoch .bar.category6,.epoch-theme-dark .epoch.category10 .bar.category6{fill:#B7BCD1;}.epoch-theme-dark .epoch div.ref.category7,.epoch-theme-dark .epoch.category10 div.ref.category7{background-color:#FF857F;}.epoch-theme-dark .epoch .category7 .line,.epoch-theme-dark .epoch.category10 .category7 .line{stroke:#FF857F;}.epoch-theme-dark .epoch .category7 .area,.epoch-theme-dark .epoch .category7 .dot,.epoch-theme-dark .epoch.category10 .category7 .area,.epoch-theme-dark .epoch.category10 .category7 .dot{fill:#FF857F;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch .arc.category7 path,.epoch-theme-dark .epoch.category10 .arc.category7 path{fill:#FF857F;}.epoch-theme-dark .epoch .bar.category7,.epoch-theme-dark .epoch.category10 .bar.category7{fill:#FF857F;}.epoch-theme-dark .epoch div.ref.category8,.epoch-theme-dark .epoch.category10 div.ref.category8{background-color:#F3DE88;}.epoch-theme-dark .epoch .category8 .line,.epoch-theme-dark .epoch.category10 .category8 .line{stroke:#F3DE88;}.epoch-theme-dark .epoch .category8 .area,.epoch-theme-dark .epoch .category8 .dot,.epoch-theme-dark .epoch.category10 .category8 .area,.epoch-theme-dark .epoch.category10 .category8 .dot{fill:#F3DE88;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch .arc.category8 path,.epoch-theme-dark .epoch.category10 .arc.category8 path{fill:#F3DE88;}.epoch-theme-dark .epoch .bar.category8,.epoch-theme-dark .epoch.category10 .bar.category8{fill:#F3DE88;}.epoch-theme-dark .epoch div.ref.category9,.epoch-theme-dark .epoch.category10 div.ref.category9{background-color:#C9935E;}.epoch-theme-dark .epoch .category9 .line,.epoch-theme-dark .epoch.category10 .category9 .line{stroke:#C9935E;}.epoch-theme-dark .epoch .category9 .area,.epoch-theme-dark .epoch .category9 .dot,.epoch-theme-dark .epoch.category10 .category9 .area,.epoch-theme-dark .epoch.category10 .category9 .dot{fill:#C9935E;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch .arc.category9 path,.epoch-theme-dark .epoch.category10 .arc.category9 path{fill:#C9935E;}.epoch-theme-dark .epoch .bar.category9,.epoch-theme-dark .epoch.category10 .bar.category9{fill:#C9935E;}.epoch-theme-dark .epoch div.ref.category10,.epoch-theme-dark .epoch.category10 div.ref.category10{background-color:#A488FF;}.epoch-theme-dark .epoch .category10 .line,.epoch-theme-dark .epoch.category10 .category10 .line{stroke:#A488FF;}.epoch-theme-dark .epoch .category10 .area,.epoch-theme-dark .epoch .category10 .dot,.epoch-theme-dark .epoch.category10 .category10 .area,.epoch-theme-dark .epoch.category10 .category10 .dot{fill:#A488FF;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch .arc.category10 path,.epoch-theme-dark .epoch.category10 .arc.category10 path{fill:#A488FF;}.epoch-theme-dark .epoch .bar.category10,.epoch-theme-dark .epoch.category10 .bar.category10{fill:#A488FF;}.epoch-theme-dark .epoch.category20 div.ref.category1{background-color:#909CFF;}.epoch-theme-dark .epoch.category20 .category1 .line{stroke:#909CFF;}.epoch-theme-dark .epoch.category20 .category1 .area,.epoch-theme-dark .epoch.category20 .category1 .dot{fill:#909CFF;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category1 path{fill:#909CFF;}.epoch-theme-dark .epoch.category20 .bar.category1{fill:#909CFF;}.epoch-theme-dark .epoch.category20 div.ref.category2{background-color:#626AAD;}.epoch-theme-dark .epoch.category20 .category2 .line{stroke:#626AAD;}.epoch-theme-dark .epoch.category20 .category2 .area,.epoch-theme-dark .epoch.category20 .category2 .dot{fill:#626AAD;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category2 path{fill:#626AAD;}.epoch-theme-dark .epoch.category20 .bar.category2{fill:#626AAD;}.epoch-theme-dark .epoch.category20 div.ref.category3{background-color:#FFAC89;}.epoch-theme-dark .epoch.category20 .category3 .line{stroke:#FFAC89;}.epoch-theme-dark .epoch.category20 .category3 .area,.epoch-theme-dark .epoch.category20 .category3 .dot{fill:#FFAC89;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category3 path{fill:#FFAC89;}.epoch-theme-dark .epoch.category20 .bar.category3{fill:#FFAC89;}.epoch-theme-dark .epoch.category20 div.ref.category4{background-color:#BD7F66;}.epoch-theme-dark .epoch.category20 .category4 .line{stroke:#BD7F66;}.epoch-theme-dark .epoch.category20 .category4 .area,.epoch-theme-dark .epoch.category20 .category4 .dot{fill:#BD7F66;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category4 path{fill:#BD7F66;}.epoch-theme-dark .epoch.category20 .bar.category4{fill:#BD7F66;}.epoch-theme-dark .epoch.category20 div.ref.category5{background-color:#E889E8;}.epoch-theme-dark .epoch.category20 .category5 .line{stroke:#E889E8;}.epoch-theme-dark .epoch.category20 .category5 .area,.epoch-theme-dark .epoch.category20 .category5 .dot{fill:#E889E8;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category5 path{fill:#E889E8;}.epoch-theme-dark .epoch.category20 .bar.category5{fill:#E889E8;}.epoch-theme-dark .epoch.category20 div.ref.category6{background-color:#995A99;}.epoch-theme-dark .epoch.category20 .category6 .line{stroke:#995A99;}.epoch-theme-dark .epoch.category20 .category6 .area,.epoch-theme-dark .epoch.category20 .category6 .dot{fill:#995A99;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category6 path{fill:#995A99;}.epoch-theme-dark .epoch.category20 .bar.category6{fill:#995A99;}.epoch-theme-dark .epoch.category20 div.ref.category7{background-color:#78E8D3;}.epoch-theme-dark .epoch.category20 .category7 .line{stroke:#78E8D3;}.epoch-theme-dark .epoch.category20 .category7 .area,.epoch-theme-dark .epoch.category20 .category7 .dot{fill:#78E8D3;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category7 path{fill:#78E8D3;}.epoch-theme-dark .epoch.category20 .bar.category7{fill:#78E8D3;}.epoch-theme-dark .epoch.category20 div.ref.category8{background-color:#4F998C;}.epoch-theme-dark .epoch.category20 .category8 .line{stroke:#4F998C;}.epoch-theme-dark .epoch.category20 .category8 .area,.epoch-theme-dark .epoch.category20 .category8 .dot{fill:#4F998C;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category8 path{fill:#4F998C;}.epoch-theme-dark .epoch.category20 .bar.category8{fill:#4F998C;}.epoch-theme-dark .epoch.category20 div.ref.category9{background-color:#C2FF97;}.epoch-theme-dark .epoch.category20 .category9 .line{stroke:#C2FF97;}.epoch-theme-dark .epoch.category20 .category9 .area,.epoch-theme-dark .epoch.category20 .category9 .dot{fill:#C2FF97;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category9 path{fill:#C2FF97;}.epoch-theme-dark .epoch.category20 .bar.category9{fill:#C2FF97;}.epoch-theme-dark .epoch.category20 div.ref.category10{background-color:#789E5E;}.epoch-theme-dark .epoch.category20 .category10 .line{stroke:#789E5E;}.epoch-theme-dark .epoch.category20 .category10 .area,.epoch-theme-dark .epoch.category20 .category10 .dot{fill:#789E5E;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category10 path{fill:#789E5E;}.epoch-theme-dark .epoch.category20 .bar.category10{fill:#789E5E;}.epoch-theme-dark .epoch.category20 div.ref.category11{background-color:#B7BCD1;}.epoch-theme-dark .epoch.category20 .category11 .line{stroke:#B7BCD1;}.epoch-theme-dark .epoch.category20 .category11 .area,.epoch-theme-dark .epoch.category20 .category11 .dot{fill:#B7BCD1;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category11 path{fill:#B7BCD1;}.epoch-theme-dark .epoch.category20 .bar.category11{fill:#B7BCD1;}.epoch-theme-dark .epoch.category20 div.ref.category12{background-color:#7F8391;}.epoch-theme-dark .epoch.category20 .category12 .line{stroke:#7F8391;}.epoch-theme-dark .epoch.category20 .category12 .area,.epoch-theme-dark .epoch.category20 .category12 .dot{fill:#7F8391;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category12 path{fill:#7F8391;}.epoch-theme-dark .epoch.category20 .bar.category12{fill:#7F8391;}.epoch-theme-dark .epoch.category20 div.ref.category13{background-color:#CCB889;}.epoch-theme-dark .epoch.category20 .category13 .line{stroke:#CCB889;}.epoch-theme-dark .epoch.category20 .category13 .area,.epoch-theme-dark .epoch.category20 .category13 .dot{fill:#CCB889;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category13 path{fill:#CCB889;}.epoch-theme-dark .epoch.category20 .bar.category13{fill:#CCB889;}.epoch-theme-dark .epoch.category20 div.ref.category14{background-color:#A1906B;}.epoch-theme-dark .epoch.category20 .category14 .line{stroke:#A1906B;}.epoch-theme-dark .epoch.category20 .category14 .area,.epoch-theme-dark .epoch.category20 .category14 .dot{fill:#A1906B;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category14 path{fill:#A1906B;}.epoch-theme-dark .epoch.category20 .bar.category14{fill:#A1906B;}.epoch-theme-dark .epoch.category20 div.ref.category15{background-color:#F3DE88;}.epoch-theme-dark .epoch.category20 .category15 .line{stroke:#F3DE88;}.epoch-theme-dark .epoch.category20 .category15 .area,.epoch-theme-dark .epoch.category20 .category15 .dot{fill:#F3DE88;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category15 path{fill:#F3DE88;}.epoch-theme-dark .epoch.category20 .bar.category15{fill:#F3DE88;}.epoch-theme-dark .epoch.category20 div.ref.category16{background-color:#A89A5E;}.epoch-theme-dark .epoch.category20 .category16 .line{stroke:#A89A5E;}.epoch-theme-dark .epoch.category20 .category16 .area,.epoch-theme-dark .epoch.category20 .category16 .dot{fill:#A89A5E;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category16 path{fill:#A89A5E;}.epoch-theme-dark .epoch.category20 .bar.category16{fill:#A89A5E;}.epoch-theme-dark .epoch.category20 div.ref.category17{background-color:#FF857F;}.epoch-theme-dark .epoch.category20 .category17 .line{stroke:#FF857F;}.epoch-theme-dark .epoch.category20 .category17 .area,.epoch-theme-dark .epoch.category20 .category17 .dot{fill:#FF857F;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category17 path{fill:#FF857F;}.epoch-theme-dark .epoch.category20 .bar.category17{fill:#FF857F;}.epoch-theme-dark .epoch.category20 div.ref.category18{background-color:#BA615D;}.epoch-theme-dark .epoch.category20 .category18 .line{stroke:#BA615D;}.epoch-theme-dark .epoch.category20 .category18 .area,.epoch-theme-dark .epoch.category20 .category18 .dot{fill:#BA615D;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category18 path{fill:#BA615D;}.epoch-theme-dark .epoch.category20 .bar.category18{fill:#BA615D;}.epoch-theme-dark .epoch.category20 div.ref.category19{background-color:#A488FF;}.epoch-theme-dark .epoch.category20 .category19 .line{stroke:#A488FF;}.epoch-theme-dark .epoch.category20 .category19 .area,.epoch-theme-dark .epoch.category20 .category19 .dot{fill:#A488FF;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category19 path{fill:#A488FF;}.epoch-theme-dark .epoch.category20 .bar.category19{fill:#A488FF;}.epoch-theme-dark .epoch.category20 div.ref.category20{background-color:#7662B8;}.epoch-theme-dark .epoch.category20 .category20 .line{stroke:#7662B8;}.epoch-theme-dark .epoch.category20 .category20 .area,.epoch-theme-dark .epoch.category20 .category20 .dot{fill:#7662B8;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20 .arc.category20 path{fill:#7662B8;}.epoch-theme-dark .epoch.category20 .bar.category20{fill:#7662B8;}.epoch-theme-dark .epoch.category20b div.ref.category1{background-color:#909CFF;}.epoch-theme-dark .epoch.category20b .category1 .line{stroke:#909CFF;}.epoch-theme-dark .epoch.category20b .category1 .area,.epoch-theme-dark .epoch.category20b .category1 .dot{fill:#909CFF;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category1 path{fill:#909CFF;}.epoch-theme-dark .epoch.category20b .bar.category1{fill:#909CFF;}.epoch-theme-dark .epoch.category20b div.ref.category2{background-color:#7680D1;}.epoch-theme-dark .epoch.category20b .category2 .line{stroke:#7680D1;}.epoch-theme-dark .epoch.category20b .category2 .area,.epoch-theme-dark .epoch.category20b .category2 .dot{fill:#7680D1;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category2 path{fill:#7680D1;}.epoch-theme-dark .epoch.category20b .bar.category2{fill:#7680D1;}.epoch-theme-dark .epoch.category20b div.ref.category3{background-color:#656DB2;}.epoch-theme-dark .epoch.category20b .category3 .line{stroke:#656DB2;}.epoch-theme-dark .epoch.category20b .category3 .area,.epoch-theme-dark .epoch.category20b .category3 .dot{fill:#656DB2;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category3 path{fill:#656DB2;}.epoch-theme-dark .epoch.category20b .bar.category3{fill:#656DB2;}.epoch-theme-dark .epoch.category20b div.ref.category4{background-color:#525992;}.epoch-theme-dark .epoch.category20b .category4 .line{stroke:#525992;}.epoch-theme-dark .epoch.category20b .category4 .area,.epoch-theme-dark .epoch.category20b .category4 .dot{fill:#525992;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category4 path{fill:#525992;}.epoch-theme-dark .epoch.category20b .bar.category4{fill:#525992;}.epoch-theme-dark .epoch.category20b div.ref.category5{background-color:#FFAC89;}.epoch-theme-dark .epoch.category20b .category5 .line{stroke:#FFAC89;}.epoch-theme-dark .epoch.category20b .category5 .area,.epoch-theme-dark .epoch.category20b .category5 .dot{fill:#FFAC89;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category5 path{fill:#FFAC89;}.epoch-theme-dark .epoch.category20b .bar.category5{fill:#FFAC89;}.epoch-theme-dark .epoch.category20b div.ref.category6{background-color:#D18D71;}.epoch-theme-dark .epoch.category20b .category6 .line{stroke:#D18D71;}.epoch-theme-dark .epoch.category20b .category6 .area,.epoch-theme-dark .epoch.category20b .category6 .dot{fill:#D18D71;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category6 path{fill:#D18D71;}.epoch-theme-dark .epoch.category20b .bar.category6{fill:#D18D71;}.epoch-theme-dark .epoch.category20b div.ref.category7{background-color:#AB735C;}.epoch-theme-dark .epoch.category20b .category7 .line{stroke:#AB735C;}.epoch-theme-dark .epoch.category20b .category7 .area,.epoch-theme-dark .epoch.category20b .category7 .dot{fill:#AB735C;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category7 path{fill:#AB735C;}.epoch-theme-dark .epoch.category20b .bar.category7{fill:#AB735C;}.epoch-theme-dark .epoch.category20b div.ref.category8{background-color:#92624E;}.epoch-theme-dark .epoch.category20b .category8 .line{stroke:#92624E;}.epoch-theme-dark .epoch.category20b .category8 .area,.epoch-theme-dark .epoch.category20b .category8 .dot{fill:#92624E;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category8 path{fill:#92624E;}.epoch-theme-dark .epoch.category20b .bar.category8{fill:#92624E;}.epoch-theme-dark .epoch.category20b div.ref.category9{background-color:#E889E8;}.epoch-theme-dark .epoch.category20b .category9 .line{stroke:#E889E8;}.epoch-theme-dark .epoch.category20b .category9 .area,.epoch-theme-dark .epoch.category20b .category9 .dot{fill:#E889E8;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category9 path{fill:#E889E8;}.epoch-theme-dark .epoch.category20b .bar.category9{fill:#E889E8;}.epoch-theme-dark .epoch.category20b div.ref.category10{background-color:#BA6EBA;}.epoch-theme-dark .epoch.category20b .category10 .line{stroke:#BA6EBA;}.epoch-theme-dark .epoch.category20b .category10 .area,.epoch-theme-dark .epoch.category20b .category10 .dot{fill:#BA6EBA;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category10 path{fill:#BA6EBA;}.epoch-theme-dark .epoch.category20b .bar.category10{fill:#BA6EBA;}.epoch-theme-dark .epoch.category20b div.ref.category11{background-color:#9B5C9B;}.epoch-theme-dark .epoch.category20b .category11 .line{stroke:#9B5C9B;}.epoch-theme-dark .epoch.category20b .category11 .area,.epoch-theme-dark .epoch.category20b .category11 .dot{fill:#9B5C9B;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category11 path{fill:#9B5C9B;}.epoch-theme-dark .epoch.category20b .bar.category11{fill:#9B5C9B;}.epoch-theme-dark .epoch.category20b div.ref.category12{background-color:#7B487B;}.epoch-theme-dark .epoch.category20b .category12 .line{stroke:#7B487B;}.epoch-theme-dark .epoch.category20b .category12 .area,.epoch-theme-dark .epoch.category20b .category12 .dot{fill:#7B487B;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category12 path{fill:#7B487B;}.epoch-theme-dark .epoch.category20b .bar.category12{fill:#7B487B;}.epoch-theme-dark .epoch.category20b div.ref.category13{background-color:#78E8D3;}.epoch-theme-dark .epoch.category20b .category13 .line{stroke:#78E8D3;}.epoch-theme-dark .epoch.category20b .category13 .area,.epoch-theme-dark .epoch.category20b .category13 .dot{fill:#78E8D3;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category13 path{fill:#78E8D3;}.epoch-theme-dark .epoch.category20b .bar.category13{fill:#78E8D3;}.epoch-theme-dark .epoch.category20b div.ref.category14{background-color:#60BAAA;}.epoch-theme-dark .epoch.category20b .category14 .line{stroke:#60BAAA;}.epoch-theme-dark .epoch.category20b .category14 .area,.epoch-theme-dark .epoch.category20b .category14 .dot{fill:#60BAAA;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category14 path{fill:#60BAAA;}.epoch-theme-dark .epoch.category20b .bar.category14{fill:#60BAAA;}.epoch-theme-dark .epoch.category20b div.ref.category15{background-color:#509B8D;}.epoch-theme-dark .epoch.category20b .category15 .line{stroke:#509B8D;}.epoch-theme-dark .epoch.category20b .category15 .area,.epoch-theme-dark .epoch.category20b .category15 .dot{fill:#509B8D;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category15 path{fill:#509B8D;}.epoch-theme-dark .epoch.category20b .bar.category15{fill:#509B8D;}.epoch-theme-dark .epoch.category20b div.ref.category16{background-color:#3F7B70;}.epoch-theme-dark .epoch.category20b .category16 .line{stroke:#3F7B70;}.epoch-theme-dark .epoch.category20b .category16 .area,.epoch-theme-dark .epoch.category20b .category16 .dot{fill:#3F7B70;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category16 path{fill:#3F7B70;}.epoch-theme-dark .epoch.category20b .bar.category16{fill:#3F7B70;}.epoch-theme-dark .epoch.category20b div.ref.category17{background-color:#C2FF97;}.epoch-theme-dark .epoch.category20b .category17 .line{stroke:#C2FF97;}.epoch-theme-dark .epoch.category20b .category17 .area,.epoch-theme-dark .epoch.category20b .category17 .dot{fill:#C2FF97;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category17 path{fill:#C2FF97;}.epoch-theme-dark .epoch.category20b .bar.category17{fill:#C2FF97;}.epoch-theme-dark .epoch.category20b div.ref.category18{background-color:#9FD17C;}.epoch-theme-dark .epoch.category20b .category18 .line{stroke:#9FD17C;}.epoch-theme-dark .epoch.category20b .category18 .area,.epoch-theme-dark .epoch.category20b .category18 .dot{fill:#9FD17C;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category18 path{fill:#9FD17C;}.epoch-theme-dark .epoch.category20b .bar.category18{fill:#9FD17C;}.epoch-theme-dark .epoch.category20b div.ref.category19{background-color:#7DA361;}.epoch-theme-dark .epoch.category20b .category19 .line{stroke:#7DA361;}.epoch-theme-dark .epoch.category20b .category19 .area,.epoch-theme-dark .epoch.category20b .category19 .dot{fill:#7DA361;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category19 path{fill:#7DA361;}.epoch-theme-dark .epoch.category20b .bar.category19{fill:#7DA361;}.epoch-theme-dark .epoch.category20b div.ref.category20{background-color:#65854E;}.epoch-theme-dark .epoch.category20b .category20 .line{stroke:#65854E;}.epoch-theme-dark .epoch.category20b .category20 .area,.epoch-theme-dark .epoch.category20b .category20 .dot{fill:#65854E;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20b .arc.category20 path{fill:#65854E;}.epoch-theme-dark .epoch.category20b .bar.category20{fill:#65854E;}.epoch-theme-dark .epoch.category20c div.ref.category1{background-color:#B7BCD1;}.epoch-theme-dark .epoch.category20c .category1 .line{stroke:#B7BCD1;}.epoch-theme-dark .epoch.category20c .category1 .area,.epoch-theme-dark .epoch.category20c .category1 .dot{fill:#B7BCD1;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category1 path{fill:#B7BCD1;}.epoch-theme-dark .epoch.category20c .bar.category1{fill:#B7BCD1;}.epoch-theme-dark .epoch.category20c div.ref.category2{background-color:#979DAD;}.epoch-theme-dark .epoch.category20c .category2 .line{stroke:#979DAD;}.epoch-theme-dark .epoch.category20c .category2 .area,.epoch-theme-dark .epoch.category20c .category2 .dot{fill:#979DAD;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category2 path{fill:#979DAD;}.epoch-theme-dark .epoch.category20c .bar.category2{fill:#979DAD;}.epoch-theme-dark .epoch.category20c div.ref.category3{background-color:#6E717D;}.epoch-theme-dark .epoch.category20c .category3 .line{stroke:#6E717D;}.epoch-theme-dark .epoch.category20c .category3 .area,.epoch-theme-dark .epoch.category20c .category3 .dot{fill:#6E717D;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category3 path{fill:#6E717D;}.epoch-theme-dark .epoch.category20c .bar.category3{fill:#6E717D;}.epoch-theme-dark .epoch.category20c div.ref.category4{background-color:#595C66;}.epoch-theme-dark .epoch.category20c .category4 .line{stroke:#595C66;}.epoch-theme-dark .epoch.category20c .category4 .area,.epoch-theme-dark .epoch.category20c .category4 .dot{fill:#595C66;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category4 path{fill:#595C66;}.epoch-theme-dark .epoch.category20c .bar.category4{fill:#595C66;}.epoch-theme-dark .epoch.category20c div.ref.category5{background-color:#FF857F;}.epoch-theme-dark .epoch.category20c .category5 .line{stroke:#FF857F;}.epoch-theme-dark .epoch.category20c .category5 .area,.epoch-theme-dark .epoch.category20c .category5 .dot{fill:#FF857F;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category5 path{fill:#FF857F;}.epoch-theme-dark .epoch.category20c .bar.category5{fill:#FF857F;}.epoch-theme-dark .epoch.category20c div.ref.category6{background-color:#DE746E;}.epoch-theme-dark .epoch.category20c .category6 .line{stroke:#DE746E;}.epoch-theme-dark .epoch.category20c .category6 .area,.epoch-theme-dark .epoch.category20c .category6 .dot{fill:#DE746E;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category6 path{fill:#DE746E;}.epoch-theme-dark .epoch.category20c .bar.category6{fill:#DE746E;}.epoch-theme-dark .epoch.category20c div.ref.category7{background-color:#B55F5A;}.epoch-theme-dark .epoch.category20c .category7 .line{stroke:#B55F5A;}.epoch-theme-dark .epoch.category20c .category7 .area,.epoch-theme-dark .epoch.category20c .category7 .dot{fill:#B55F5A;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category7 path{fill:#B55F5A;}.epoch-theme-dark .epoch.category20c .bar.category7{fill:#B55F5A;}.epoch-theme-dark .epoch.category20c div.ref.category8{background-color:#964E4B;}.epoch-theme-dark .epoch.category20c .category8 .line{stroke:#964E4B;}.epoch-theme-dark .epoch.category20c .category8 .area,.epoch-theme-dark .epoch.category20c .category8 .dot{fill:#964E4B;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category8 path{fill:#964E4B;}.epoch-theme-dark .epoch.category20c .bar.category8{fill:#964E4B;}.epoch-theme-dark .epoch.category20c div.ref.category9{background-color:#F3DE88;}.epoch-theme-dark .epoch.category20c .category9 .line{stroke:#F3DE88;}.epoch-theme-dark .epoch.category20c .category9 .area,.epoch-theme-dark .epoch.category20c .category9 .dot{fill:#F3DE88;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category9 path{fill:#F3DE88;}.epoch-theme-dark .epoch.category20c .bar.category9{fill:#F3DE88;}.epoch-theme-dark .epoch.category20c div.ref.category10{background-color:#DBC87B;}.epoch-theme-dark .epoch.category20c .category10 .line{stroke:#DBC87B;}.epoch-theme-dark .epoch.category20c .category10 .area,.epoch-theme-dark .epoch.category20c .category10 .dot{fill:#DBC87B;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category10 path{fill:#DBC87B;}.epoch-theme-dark .epoch.category20c .bar.category10{fill:#DBC87B;}.epoch-theme-dark .epoch.category20c div.ref.category11{background-color:#BAAA68;}.epoch-theme-dark .epoch.category20c .category11 .line{stroke:#BAAA68;}.epoch-theme-dark .epoch.category20c .category11 .area,.epoch-theme-dark .epoch.category20c .category11 .dot{fill:#BAAA68;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category11 path{fill:#BAAA68;}.epoch-theme-dark .epoch.category20c .bar.category11{fill:#BAAA68;}.epoch-theme-dark .epoch.category20c div.ref.category12{background-color:#918551;}.epoch-theme-dark .epoch.category20c .category12 .line{stroke:#918551;}.epoch-theme-dark .epoch.category20c .category12 .area,.epoch-theme-dark .epoch.category20c .category12 .dot{fill:#918551;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category12 path{fill:#918551;}.epoch-theme-dark .epoch.category20c .bar.category12{fill:#918551;}.epoch-theme-dark .epoch.category20c div.ref.category13{background-color:#C9935E;}.epoch-theme-dark .epoch.category20c .category13 .line{stroke:#C9935E;}.epoch-theme-dark .epoch.category20c .category13 .area,.epoch-theme-dark .epoch.category20c .category13 .dot{fill:#C9935E;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category13 path{fill:#C9935E;}.epoch-theme-dark .epoch.category20c .bar.category13{fill:#C9935E;}.epoch-theme-dark .epoch.category20c div.ref.category14{background-color:#B58455;}.epoch-theme-dark .epoch.category20c .category14 .line{stroke:#B58455;}.epoch-theme-dark .epoch.category20c .category14 .area,.epoch-theme-dark .epoch.category20c .category14 .dot{fill:#B58455;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category14 path{fill:#B58455;}.epoch-theme-dark .epoch.category20c .bar.category14{fill:#B58455;}.epoch-theme-dark .epoch.category20c div.ref.category15{background-color:#997048;}.epoch-theme-dark .epoch.category20c .category15 .line{stroke:#997048;}.epoch-theme-dark .epoch.category20c .category15 .area,.epoch-theme-dark .epoch.category20c .category15 .dot{fill:#997048;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category15 path{fill:#997048;}.epoch-theme-dark .epoch.category20c .bar.category15{fill:#997048;}.epoch-theme-dark .epoch.category20c div.ref.category16{background-color:#735436;}.epoch-theme-dark .epoch.category20c .category16 .line{stroke:#735436;}.epoch-theme-dark .epoch.category20c .category16 .area,.epoch-theme-dark .epoch.category20c .category16 .dot{fill:#735436;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category16 path{fill:#735436;}.epoch-theme-dark .epoch.category20c .bar.category16{fill:#735436;}.epoch-theme-dark .epoch.category20c div.ref.category17{background-color:#A488FF;}.epoch-theme-dark .epoch.category20c .category17 .line{stroke:#A488FF;}.epoch-theme-dark .epoch.category20c .category17 .area,.epoch-theme-dark .epoch.category20c .category17 .dot{fill:#A488FF;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category17 path{fill:#A488FF;}.epoch-theme-dark .epoch.category20c .bar.category17{fill:#A488FF;}.epoch-theme-dark .epoch.category20c div.ref.category18{background-color:#8670D1;}.epoch-theme-dark .epoch.category20c .category18 .line{stroke:#8670D1;}.epoch-theme-dark .epoch.category20c .category18 .area,.epoch-theme-dark .epoch.category20c .category18 .dot{fill:#8670D1;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category18 path{fill:#8670D1;}.epoch-theme-dark .epoch.category20c .bar.category18{fill:#8670D1;}.epoch-theme-dark .epoch.category20c div.ref.category19{background-color:#705CAD;}.epoch-theme-dark .epoch.category20c .category19 .line{stroke:#705CAD;}.epoch-theme-dark .epoch.category20c .category19 .area,.epoch-theme-dark .epoch.category20c .category19 .dot{fill:#705CAD;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category19 path{fill:#705CAD;}.epoch-theme-dark .epoch.category20c .bar.category19{fill:#705CAD;}.epoch-theme-dark .epoch.category20c div.ref.category20{background-color:#52447F;}.epoch-theme-dark .epoch.category20c .category20 .line{stroke:#52447F;}.epoch-theme-dark .epoch.category20c .category20 .area,.epoch-theme-dark .epoch.category20c .category20 .dot{fill:#52447F;stroke:rgba(0, 0, 0, 0);}.epoch-theme-dark .epoch.category20c .arc.category20 path{fill:#52447F;}.epoch-theme-dark .epoch.category20c .bar.category20{fill:#52447F;} \ No newline at end of file diff --git a/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/static/epoch.min.js b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/static/epoch.min.js new file mode 100644 index 000000000..0c654b866 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/static/epoch.min.js @@ -0,0 +1,114 @@ +(function(){var e;null==window.Epoch&&(window.Epoch={});null==(e=window.Epoch).Chart&&(e.Chart={});null==(e=window.Epoch).Time&&(e.Time={});null==(e=window.Epoch).Util&&(e.Util={});null==(e=window.Epoch).Formats&&(e.Formats={});Epoch.warn=function(g){return(console.warn||console.log)("Epoch Warning: "+g)};Epoch.exception=function(g){throw"Epoch Error: "+g;}}).call(this); +(function(){Epoch.TestContext=function(){function e(){var c,a,d;this._log=[];a=0;for(d=g.length;ac){if((c|0)!==c||d)c=c.toFixed(a);return c}f="KMGTPEZY".split("");for(h in f)if(k=f[h],b=Math.pow(10,3*((h|0)+1)),c>=b&&cc){if(0!==c%1||d)c=c.toFixed(a);return""+c+" B"}f="KB MB GB TB PB EB ZB YB".split(" ");for(h in f)if(k=f[h],b=Math.pow(1024,(h|0)+1),c>=b&&cf;k=1<=f?++a:--a)q.push(arguments[k]);return q}.apply(this,arguments);c=this._events[a];m=[];f=0;for(q=c.length;fthis.options.windowSize+1&&a.values.shift();b=[this._ticks[0],this._ticks[this._ticks.length-1]];a=b[0];b=b[1];null!=b&&b.enter&&(b.enter=!1,b.opacity=1);null!=a&&a.exit&&this._shiftTick();this.animation.frame=0;this.trigger("transition:end");if(0this.options.queueSize&&this._queue.splice(this.options.queueSize,this._queue.length-this.options.queueSize);if(this._queue.length===this.options.queueSize)return!1;this._queue.push(a.map(function(a){return function(b){return a._prepareEntry(b)}}(this)));this.trigger("push");if(!this.inTransition())return this._startTransition()}; +a.prototype._shift=function(){var a,b,c,d;this.trigger("before:shift");a=this._queue.shift();d=this.data;for(b in d)c=d[b],c.values.push(a[b]);this._updateTicks(a[0].time);this._transitionRangeAxes();return this.trigger("after:shift")};a.prototype._transitionRangeAxes=function(){this.hasAxis("left")&&this.svg.selectAll(".y.axis.left").transition().duration(500).ease("linear").call(this.leftAxis());if(this.hasAxis("right"))return this.svg.selectAll(".y.axis.right").transition().duration(500).ease("linear").call(this.rightAxis())}; +a.prototype._animate=function(){if(this.inTransition())return++this.animation.frame===this.animation.duration&&this._stopTransition(),this.draw(this.animation.frame*this.animation.delta()),this._updateTimeAxes()};a.prototype.y=function(){return d3.scale.linear().domain(this.extent(function(a){return a.y})).range([this.innerHeight(),0])};a.prototype.ySvg=function(){return d3.scale.linear().domain(this.extent(function(a){return a.y})).range([this.innerHeight()/this.pixelRatio,0])};a.prototype.w=function(){return this.innerWidth()/ +this.options.windowSize};a.prototype._updateTicks=function(a){if(this.hasAxis("top")||this.hasAxis("bottom"))if(++this._tickTimer%this.options.ticks.time||this._pushTick(this.options.windowSize,a,!0),!(0<=this._ticks[0].x-this.w()/this.pixelRatio))return this._ticks[0].exit=!0};a.prototype._pushTick=function(a,b,c,d){null==c&&(c=!1);null==d&&(d=!1);if(this.hasAxis("top")||this.hasAxis("bottom"))return b={time:b,x:a*(this.w()/this.pixelRatio)+this._offsetX(),opacity:c?0:1,enter:c?!0:!1,exit:!1},this.hasAxis("bottom")&& +(a=this.bottomAxis.append("g").attr("class","tick major").attr("transform","translate("+(b.x+1)+",0)").style("opacity",b.opacity),a.append("line").attr("y2",6),a.append("text").attr("text-anchor","middle").attr("dy",19).text(this.options.tickFormats.bottom(b.time)),b.bottomEl=a),this.hasAxis("top")&&(a=this.topAxis.append("g").attr("class","tick major").attr("transform","translate("+(b.x+1)+",0)").style("opacity",b.opacity),a.append("line").attr("y2",-6),a.append("text").attr("text-anchor","middle").attr("dy", +-10).text(this.options.tickFormats.top(b.time)),b.topEl=a),d?this._ticks.unshift(b):this._ticks.push(b),b};a.prototype._shiftTick=function(){var a;if(0f;b=0<=f?++c:--c)k=0,e.push(function(){var a,c,d,f;d=this.data;f=[];a=0;for(c=d.length;ag;a=0<=g?++f:--f){b=e=k=0;for(m=this.data.length;0<=m?em;b=0<=m?++e:--e)k+=this.data[b].values[a].y;k>c&&(c=k)}return[0,c]};return a}(Epoch.Time.Plot)}).call(this); +(function(){var e={}.hasOwnProperty,g=function(c,a){function d(){this.constructor=c}for(var b in a)e.call(a,b)&&(c[b]=a[b]);d.prototype=a.prototype;c.prototype=new d;c.__super__=a.prototype;return c};Epoch.Time.Area=function(c){function a(){return a.__super__.constructor.apply(this,arguments)}g(a,c);a.prototype.setStyles=function(a){a=null!=a.className?this.getStyles("g."+a.className.replace(/\s/g,".")+" path.area"):this.getStyles("g path.area");this.ctx.fillStyle=a.fill;null!=a.stroke&&(this.ctx.strokeStyle= +a.stroke);if(null!=a["stroke-width"])return this.ctx.lineWidth=a["stroke-width"].replace("px","")};a.prototype._drawAreas=function(a){var b,c,k,f,e,g,m,l,n,p;null==a&&(a=0);g=[this.y(),this.w()];m=g[0];g=g[1];p=[];for(c=l=n=this.data.length-1;0>=n?0>=l:0<=l;c=0>=n?++l:--l){f=this.data[c];this.setStyles(f);this.ctx.beginPath();e=[this.options.windowSize,f.values.length,this.inTransition()];c=e[0];k=e[1];for(e=e[2];-2<=--c&&0<=--k;)b=f.values[k],b=[(c+1)*g+a,m(b.y+b.y0)],e&&(b[0]+=g),c===this.options.windowSize- +1?this.ctx.moveTo.apply(this.ctx,b):this.ctx.lineTo.apply(this.ctx,b);c=e?(c+3)*g+a:(c+2)*g+a;this.ctx.lineTo(c,this.innerHeight());this.ctx.lineTo(this.width*this.pixelRatio+g+a,this.innerHeight());this.ctx.closePath();p.push(this.ctx.fill())}return p};a.prototype._drawStrokes=function(a){var b,c,k,f,e,g,m,l,n,p;null==a&&(a=0);c=[this.y(),this.w()];m=c[0];g=c[1];p=[];for(c=l=n=this.data.length-1;0>=n?0>=l:0<=l;c=0>=n?++l:--l){f=this.data[c];this.setStyles(f);this.ctx.beginPath();e=[this.options.windowSize, +f.values.length,this.inTransition()];c=e[0];k=e[1];for(e=e[2];-2<=--c&&0<=--k;)b=f.values[k],b=[(c+1)*g+a,m(b.y+b.y0)],e&&(b[0]+=g),c===this.options.windowSize-1?this.ctx.moveTo.apply(this.ctx,b):this.ctx.lineTo.apply(this.ctx,b);p.push(this.ctx.stroke())}return p};a.prototype.draw=function(c){null==c&&(c=0);this.clear();this._drawAreas(c);this._drawStrokes(c);return a.__super__.draw.call(this)};return a}(Epoch.Time.Stack)}).call(this); +(function(){var e={}.hasOwnProperty,g=function(c,a){function d(){this.constructor=c}for(var b in a)e.call(a,b)&&(c[b]=a[b]);d.prototype=a.prototype;c.prototype=new d;c.__super__=a.prototype;return c};Epoch.Time.Bar=function(c){function a(){return a.__super__.constructor.apply(this,arguments)}g(a,c);a.prototype._offsetX=function(){return 0.5*this.w()/this.pixelRatio};a.prototype.setStyles=function(a){a=this.getStyles("rect.bar."+a.replace(/\s/g,"."));this.ctx.fillStyle=a.fill;this.ctx.strokeStyle= +null==a.stroke||"none"===a.stroke?"transparent":a.stroke;if(null!=a["stroke-width"])return this.ctx.lineWidth=a["stroke-width"].replace("px","")};a.prototype.draw=function(c){var b,h,k,f,e,g,m,l,n,p,r,s,t;null==c&&(c=0);this.clear();f=[this.y(),this.w()];p=f[0];n=f[1];t=this.data;r=0;for(s=t.length;r=e&&0<=--g;)b=m.values[g],k=[f*n+c, +b.y,b.y0],b=k[0],h=k[1],k=k[2],l&&(b+=n),b=[b+1,p(h+k),n-2,this.innerHeight()-p(h)+0.5*this.pixelRatio],this.ctx.fillRect.apply(this.ctx,b),this.ctx.strokeRect.apply(this.ctx,b);return a.__super__.draw.call(this)};return a}(Epoch.Time.Stack)}).call(this); +(function(){var e={}.hasOwnProperty,g=function(c,a){function d(){this.constructor=c}for(var b in a)e.call(a,b)&&(c[b]=a[b]);d.prototype=a.prototype;c.prototype=new d;c.__super__=a.prototype;return c};Epoch.Time.Gauge=function(c){function a(c){this.options=null!=c?c:{};a.__super__.constructor.call(this,this.options=Epoch.Util.defaults(this.options,d));this.value=this.options.value||0;"absolute"!==this.el.style("position")&&"relative"!==this.el.style("position")&&this.el.style("position","relative"); +this.svg=this.el.insert("svg",":first-child").attr("width",this.width).attr("height",this.height).attr("class","gauge-labels");this.svg.style({position:"absolute","z-index":"1"});this.svg.append("g").attr("transform","translate("+this.textX()+", "+this.textY()+")").append("text").attr("class","value").text(this.options.format(this.value));this.animation={interval:null,active:!1,delta:0,target:0};this._animate=function(a){return function(){Math.abs(a.animation.target-a.value)=t;b=0<=t?++s:--s)b=l(b),b=[Math.cos(b),Math.sin(b)],c=b[0],m=b[1],b=c*(g-n)+d,r=m*(g-n)+e,c=c*(g-n-p)+d,m=m*(g-n-p)+e,this.ctx.moveTo(b,r),this.ctx.lineTo(c,m);this.ctx.stroke();this.setStyles(".epoch .gauge .arc.outer");this.ctx.beginPath();this.ctx.arc(d,e,g,-1.125* +Math.PI,0.125*Math.PI,!1);this.ctx.stroke();this.setStyles(".epoch .gauge .arc.inner");this.ctx.beginPath();this.ctx.arc(d,e,g-10,-1.125*Math.PI,0.125*Math.PI,!1);this.ctx.stroke();this.drawNeedle();return a.__super__.draw.call(this)};a.prototype.drawNeedle=function(){var a,b,c;c=[this.centerX(),this.centerY(),this.radius()];a=c[0];b=c[1];c=c[2];this.setStyles(".epoch .gauge .needle");this.ctx.beginPath();this.ctx.save();this.ctx.translate(a,b);this.ctx.rotate(this.getAngle(this.value));this.ctx.moveTo(4* +this.pixelRatio,0);this.ctx.lineTo(-4*this.pixelRatio,0);this.ctx.lineTo(-1*this.pixelRatio,19-c);this.ctx.lineTo(1,19-c);this.ctx.fill();this.setStyles(".epoch .gauge .needle-base");this.ctx.beginPath();this.ctx.arc(0,0,this.getWidth()/25,0,2*Math.PI);this.ctx.fill();return this.ctx.restore()};a.prototype.domainChanged=function(){return this.draw()};a.prototype.ticksChanged=function(){return this.draw()};a.prototype.tickSizeChanged=function(){return this.draw()};a.prototype.tickOffsetChanged=function(){return this.draw()}; +a.prototype.formatChanged=function(){return this.svg.select("text.value").text(this.options.format(this.value))};return a}(Epoch.Chart.Canvas)}).call(this); +(function(){var e={}.hasOwnProperty,g=function(c,a){function d(){this.constructor=c}for(var b in a)e.call(a,b)&&(c[b]=a[b]);d.prototype=a.prototype;c.prototype=new d;c.__super__=a.prototype;return c};Epoch.Time.Heatmap=function(c){function a(c){this.options=c;a.__super__.constructor.call(this,this.options=Epoch.Util.defaults(this.options,b));this._setOpacityFunction();this._setupPaintCanvas();this.onAll(e)}var d,b,e;g(a,c);b={buckets:10,bucketRange:[0,100],opacity:"linear",bucketPadding:2,paintZeroValues:!1, +cutOutliers:!1};d={root:function(a,b){return Math.pow(a/b,0.5)},linear:function(a,b){return a/b},quadratic:function(a,b){return Math.pow(a/b,2)},cubic:function(a,b){return Math.pow(a/b,3)},quartic:function(a,b){return Math.pow(a/b,4)},quintic:function(a,b){return Math.pow(a/b,5)}};e={"option:buckets":"bucketsChanged","option:bucketRange":"bucketRangeChanged","option:opacity":"opacityChanged","option:bucketPadding":"bucketPaddingChanged","option:paintZeroValues":"paintZeroValuesChanged","option:cutOutliers":"cutOutliersChanged"}; +a.prototype._setOpacityFunction=function(){if(Epoch.isString(this.options.opacity)){if(this._opacityFn=d[this.options.opacity],null==this._opacityFn)return Epoch.exception("Unknown coloring function provided '"+this.options.opacity+"'")}else return Epoch.isFunction(this.options.opacity)?this._opacityFn=this.options.opacity:Epoch.exception("Unknown type for provided coloring function.")};a.prototype.setData=function(b){var c,d,e,g;a.__super__.setData.call(this,b);e=this.data;g=[];c=0;for(d=e.length;c< +d;c++)b=e[c],g.push(b.values=b.values.map(function(a){return function(b){return a._prepareEntry(b)}}(this)));return g};a.prototype._getBuckets=function(a){var b,c,d,e,g;e=a.time;g=[];b=0;for(d=this.options.buckets;0<=d?bd;0<=d?++b:--b)g.push(0);e={time:e,max:0,buckets:g};b=(this.options.bucketRange[1]-this.options.bucketRange[0])/this.options.buckets;g=a.histogram;for(c in g)a=g[c],d=parseInt((c-this.options.bucketRange[0])/b),this.options.cutOutliers&&(0>d||d>=this.options.buckets)||(0>d?d= +0:d>=this.options.buckets&&(d=this.options.buckets-1),e.buckets[d]+=parseInt(a));c=a=0;for(b=e.buckets.length;0<=b?ab;c=0<=b?++a:--a)e.max=Math.max(e.max,e.buckets[c]);return e};a.prototype.y=function(){return d3.scale.linear().domain(this.options.bucketRange).range([this.innerHeight(),0])};a.prototype.ySvg=function(){return d3.scale.linear().domain(this.options.bucketRange).range([this.innerHeight()/this.pixelRatio,0])};a.prototype.h=function(){return this.innerHeight()/this.options.buckets}; +a.prototype._offsetX=function(){return 0.5*this.w()/this.pixelRatio};a.prototype._setupPaintCanvas=function(){this.paintWidth=(this.options.windowSize+1)*this.w();this.paintHeight=this.height*this.pixelRatio;this.paint=document.createElement("CANVAS");this.paint.width=this.paintWidth;this.paint.height=this.paintHeight;this.p=Epoch.Util.getContext(this.paint);this.redraw();this.on("after:shift","_paintEntry");this.on("transition:end","_shiftPaintCanvas");return this.on("transition:end",function(a){return function(){return a.draw(a.animation.frame* +a.animation.delta())}}(this))};a.prototype.redraw=function(){var a,b;b=this.data[0].values.length;a=this.options.windowSize;for(this.inTransition()&&a++;0<=--b&&0<=--a;)this._paintEntry(b,a);return this.draw(this.animation.frame*this.animation.delta())};a.prototype._computeColor=function(a,b,c){return Epoch.Util.toRGBA(c,this._opacityFn(a,b))};a.prototype._paintEntry=function(a,b){var c,d,e,g,h,p,r,s,t,v,y,w,A,z;null==a&&(a=null);null==b&&(b=null);g=[this.w(),this.h()];y=g[0];p=g[1];null==a&&(a=this.data[0].values.length- +1);null==b&&(b=this.options.windowSize);g=[];var x;x=[];h=0;for(v=this.options.buckets;0<=v?hv;0<=v?++h:--h)x.push(0);v=0;t=this.data;d=0;for(r=t.length;d code[class*="language-"], +pre[class*="language-"] { + background: #f5f2f0; +} + +/* Inline code */ +:not(pre) > code[class*="language-"] { + padding: .1em; + border-radius: .3em; +} + +.token.comment, +.token.prolog, +.token.doctype, +.token.cdata { + color: slategray; +} + +.token.punctuation { + color: #999; +} + +.namespace { + opacity: .7; +} + +.token.property, +.token.tag, +.token.boolean, +.token.number, +.token.constant, +.token.symbol, +.token.deleted { + color: #905; +} + +.token.selector, +.token.attr-name, +.token.string, +.token.char, +.token.builtin, +.token.inserted { + color: #690; +} + +.token.operator, +.token.entity, +.token.url, +.language-css .token.string, +.style .token.string { + color: #a67f59; + background: hsla(0, 0%, 100%, .5); +} + +.token.atrule, +.token.attr-value, +.token.keyword { + color: #07a; +} + +.token.function { + color: #DD4A68; +} + +.token.regex, +.token.important, +.token.variable { + color: #e90; +} + +.token.important, +.token.bold { + font-weight: bold; +} +.token.italic { + font-style: italic; +} + +.token.entity { + cursor: help; +} + diff --git a/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/static/prismjs.min.js b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/static/prismjs.min.js new file mode 100644 index 000000000..a6855a787 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/static/prismjs.min.js @@ -0,0 +1,5 @@ +/* http://prismjs.com/download.html?themes=prism&languages=clike+javascript+go */ +self="undefined"!=typeof window?window:"undefined"!=typeof WorkerGlobalScope&&self instanceof WorkerGlobalScope?self:{};var Prism=function(){var e=/\blang(?:uage)?-(?!\*)(\w+)\b/i,t=self.Prism={util:{encode:function(e){return e instanceof n?new n(e.type,t.util.encode(e.content),e.alias):"Array"===t.util.type(e)?e.map(t.util.encode):e.replace(/&/g,"&").replace(/e.length)break e;if(!(d instanceof a)){u.lastIndex=0;var m=u.exec(d);if(m){c&&(f=m[1].length);var y=m.index-1+f,m=m[0].slice(f),v=m.length,k=y+v,b=d.slice(0,y+1),w=d.slice(k+1),N=[p,1];b&&N.push(b);var O=new a(l,g?t.tokenize(m,g):m,h);N.push(O),w&&N.push(w),Array.prototype.splice.apply(r,N)}}}}}return r},hooks:{all:{},add:function(e,n){var a=t.hooks.all;a[e]=a[e]||[],a[e].push(n)},run:function(e,n){var a=t.hooks.all[e];if(a&&a.length)for(var r,i=0;r=a[i++];)r(n)}}},n=t.Token=function(e,t,n){this.type=e,this.content=t,this.alias=n};if(n.stringify=function(e,a,r){if("string"==typeof e)return e;if("Array"===t.util.type(e))return e.map(function(t){return n.stringify(t,a,e)}).join("");var i={type:e.type,content:n.stringify(e.content,a,r),tag:"span",classes:["token",e.type],attributes:{},language:a,parent:r};if("comment"==i.type&&(i.attributes.spellcheck="true"),e.alias){var l="Array"===t.util.type(e.alias)?e.alias:[e.alias];Array.prototype.push.apply(i.classes,l)}t.hooks.run("wrap",i);var s="";for(var o in i.attributes)s+=o+'="'+(i.attributes[o]||"")+'"';return"<"+i.tag+' class="'+i.classes.join(" ")+'" '+s+">"+i.content+""},!self.document)return self.addEventListener?(self.addEventListener("message",function(e){var n=JSON.parse(e.data),a=n.language,r=n.code;self.postMessage(JSON.stringify(t.util.encode(t.tokenize(r,t.languages[a])))),self.close()},!1),self.Prism):self.Prism;var a=document.getElementsByTagName("script");return a=a[a.length-1],a&&(t.filename=a.src,document.addEventListener&&!a.hasAttribute("data-manual")&&document.addEventListener("DOMContentLoaded",t.highlightAll)),self.Prism}();"undefined"!=typeof module&&module.exports&&(module.exports=Prism);; +Prism.languages.clike={comment:[{pattern:/(^|[^\\])\/\*[\w\W]*?\*\//,lookbehind:!0},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0}],string:/("|')(\\\n|\\?.)*?\1/,"class-name":{pattern:/((?:(?:class|interface|extends|implements|trait|instanceof|new)\s+)|(?:catch\s+\())[a-z0-9_\.\\]+/i,lookbehind:!0,inside:{punctuation:/(\.|\\)/}},keyword:/\b(if|else|while|do|for|return|in|instanceof|function|new|try|throw|catch|finally|null|break|continue)\b/,"boolean":/\b(true|false)\b/,"function":{pattern:/[a-z0-9_]+\(/i,inside:{punctuation:/\(/}},number:/\b-?(0x[\dA-Fa-f]+|\d*\.?\d+([Ee]-?\d+)?)\b/,operator:/[-+]{1,2}|!|<=?|>=?|={1,3}|&{1,2}|\|?\||\?|\*|\/|~|\^|%/,ignore:/&(lt|gt|amp);/i,punctuation:/[{}[\];(),.:]/};; +Prism.languages.javascript=Prism.languages.extend("clike",{keyword:/\b(break|case|catch|class|const|continue|debugger|default|delete|do|else|enum|export|extends|false|finally|for|function|get|if|implements|import|in|instanceof|interface|let|new|null|package|private|protected|public|return|set|static|super|switch|this|throw|true|try|typeof|var|void|while|with|yield)\b/,number:/\b-?(0x[\dA-Fa-f]+|\d*\.?\d+([Ee][+-]?\d+)?|NaN|-?Infinity)\b/,"function":/(?!\d)[a-z0-9_$]+(?=\()/i}),Prism.languages.insertBefore("javascript","keyword",{regex:{pattern:/(^|[^/])\/(?!\/)(\[.+?]|\\.|[^/\r\n])+\/[gim]{0,3}(?=\s*($|[\r\n,.;})]))/,lookbehind:!0}}),Prism.languages.markup&&Prism.languages.insertBefore("markup","tag",{script:{pattern:/[\w\W]*?<\/script>/i,inside:{tag:{pattern:/|<\/script>/i,inside:Prism.languages.markup.tag.inside},rest:Prism.languages.javascript},alias:"language-javascript"}});; +Prism.languages.go=Prism.languages.extend("clike",{keyword:/\b(break|case|chan|const|continue|default|defer|else|fallthrough|for|func|go(to)?|if|import|interface|map|package|range|return|select|struct|switch|type|var)\b/,builtin:/\b(bool|byte|complex(64|128)|error|float(32|64)|rune|string|u?int(8|16|32|64|)|uintptr|append|cap|close|complex|copy|delete|imag|len|make|new|panic|print(ln)?|real|recover)\b/,"boolean":/\b(_|iota|nil|true|false)\b/,operator:/([(){}\[\]]|[*\/%^!]=?|\+[=+]?|-[>=-]?|\|[=|]?|>[=>]?|<(<|[=-])?|==?|&(&|=|^=?)?|\.(\.\.)?|[,;]|:=?)/,number:/\b(-?(0x[a-f\d]+|(\d+\.?\d*|\.\d+)(e[-+]?\d+)?)i?)\b/i,string:/("|'|`)(\\?.|\r|\n)*?\1/}),delete Prism.languages.go["class-name"];; diff --git a/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/static/realtime.js b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/static/realtime.js new file mode 100644 index 000000000..919dae26c --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/resources/static/realtime.js @@ -0,0 +1,144 @@ + + +function StartRealtime(roomid, timestamp) { + StartEpoch(timestamp); + StartSSE(roomid); + StartForm(); +} + +function StartForm() { + $('#chat-message').focus(); + $('#chat-form').ajaxForm(function() { + $('#chat-message').val(''); + $('#chat-message').focus(); + }); +} + +function StartEpoch(timestamp) { + var windowSize = 60; + var height = 200; + var defaultData = histogram(windowSize, timestamp); + + window.heapChart = $('#heapChart').epoch({ + type: 'time.area', + axes: ['bottom', 'left'], + height: height, + historySize: 10, + data: [ + {values: defaultData}, + {values: defaultData} + ] + }); + + window.mallocsChart = $('#mallocsChart').epoch({ + type: 'time.area', + axes: ['bottom', 'left'], + height: height, + historySize: 10, + data: [ + {values: defaultData}, + {values: defaultData} + ] + }); + + window.messagesChart = $('#messagesChart').epoch({ + type: 'time.line', + axes: ['bottom', 'left'], + height: 240, + historySize: 10, + data: [ + {values: defaultData}, + {values: defaultData}, + {values: defaultData} + ] + }); +} + +function StartSSE(roomid) { + if (!window.EventSource) { + alert("EventSource is not enabled in this browser"); + return; + } + var source = new EventSource('/stream/'+roomid); + source.addEventListener('message', newChatMessage, false); + source.addEventListener('stats', stats, false); +} + +function stats(e) { + var data = parseJSONStats(e.data); + heapChart.push(data.heap); + mallocsChart.push(data.mallocs); + messagesChart.push(data.messages); +} + +function parseJSONStats(e) { + var data = jQuery.parseJSON(e); + var timestamp = data.timestamp; + + var heap = [ + {time: timestamp, y: data.HeapInuse}, + {time: timestamp, y: data.StackInuse} + ]; + + var mallocs = [ + {time: timestamp, y: data.Mallocs}, + {time: timestamp, y: data.Frees} + ]; + var messages = [ + {time: timestamp, y: data.Connected}, + {time: timestamp, y: data.Inbound}, + {time: timestamp, y: data.Outbound} + ]; + + return { + heap: heap, + mallocs: mallocs, + messages: messages + } +} + +function newChatMessage(e) { + var data = jQuery.parseJSON(e.data); + var nick = data.nick; + var message = data.message; + var style = rowStyle(nick); + var html = ""+nick+""+message+""; + $('#chat').append(html); + + $("#chat-scroll").scrollTop($("#chat-scroll")[0].scrollHeight); +} + +function histogram(windowSize, timestamp) { + var entries = new Array(windowSize); + for(var i = 0; i < windowSize; i++) { + entries[i] = {time: (timestamp-windowSize+i-1), y:0}; + } + return entries; +} + +var entityMap = { + "&": "&", + "<": "<", + ">": ">", + '"': '"', + "'": ''', + "/": '/' +}; + +function rowStyle(nick) { + var classes = ['active', 'success', 'info', 'warning', 'danger']; + var index = hashCode(nick)%5; + return classes[index]; +} + +function hashCode(s){ + return Math.abs(s.split("").reduce(function(a,b){a=((a<<5)-a)+b.charCodeAt(0);return a&a},0)); +} + +function escapeHtml(string) { + return String(string).replace(/[&<>"'\/]/g, function (s) { + return entityMap[s]; + }); +} + +window.StartRealtime = StartRealtime diff --git a/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/rooms.go b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/rooms.go new file mode 100644 index 000000000..82396ba37 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/rooms.go @@ -0,0 +1,25 @@ +package main + +import "github.com/dustin/go-broadcast" + +var roomChannels = make(map[string]broadcast.Broadcaster) + +func openListener(roomid string) chan interface{} { + listener := make(chan interface{}) + room(roomid).Register(listener) + return listener +} + +func closeListener(roomid string, listener chan interface{}) { + room(roomid).Unregister(listener) + close(listener) +} + +func room(roomid string) broadcast.Broadcaster { + b, ok := roomChannels[roomid] + if !ok { + b = broadcast.NewBroadcaster(10) + roomChannels[roomid] = b + } + return b +} diff --git a/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/routes.go b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/routes.go new file mode 100644 index 000000000..86da9bea2 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/routes.go @@ -0,0 +1,95 @@ +package main + +import ( + "fmt" + "html" + "io" + "strings" + "time" + + "github.com/gin-gonic/gin" +) + +func rateLimit(c *gin.Context) { + ip := c.ClientIP() + value := int(ips.Add(ip, 1)) + if value%50 == 0 { + fmt.Printf("ip: %s, count: %d\n", ip, value) + } + if value >= 200 { + if value%200 == 0 { + fmt.Println("ip blocked") + } + c.Abort() + c.String(503, "you were automatically banned :)") + } +} + +func index(c *gin.Context) { + c.Redirect(301, "/room/hn") +} + +func roomGET(c *gin.Context) { + roomid := c.Param("roomid") + nick := c.Query("nick") + if len(nick) < 2 { + nick = "" + } + if len(nick) > 13 { + nick = nick[0:12] + "..." + } + c.HTML(200, "room_login.templ.html", gin.H{ + "roomid": roomid, + "nick": nick, + "timestamp": time.Now().Unix(), + }) + +} + +func roomPOST(c *gin.Context) { + roomid := c.Param("roomid") + nick := c.Query("nick") + message := c.PostForm("message") + message = strings.TrimSpace(message) + + validMessage := len(message) > 1 && len(message) < 200 + validNick := len(nick) > 1 && len(nick) < 14 + if !validMessage || !validNick { + c.JSON(400, gin.H{ + "status": "failed", + "error": "the message or nickname is too long", + }) + return + } + + post := gin.H{ + "nick": html.EscapeString(nick), + "message": html.EscapeString(message), + } + messages.Add("inbound", 1) + room(roomid).Submit(post) + c.JSON(200, post) +} + +func streamRoom(c *gin.Context) { + roomid := c.Param("roomid") + listener := openListener(roomid) + ticker := time.NewTicker(1 * time.Second) + users.Add("connected", 1) + defer func() { + closeListener(roomid, listener) + ticker.Stop() + users.Add("disconnected", 1) + }() + + c.Stream(func(w io.Writer) bool { + select { + case msg := <-listener: + messages.Add("outbound", 1) + c.SSEvent("message", msg) + case <-ticker.C: + c.SSEvent("stats", Stats()) + } + return true + }) +} diff --git a/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/stats.go b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/stats.go new file mode 100644 index 000000000..4afedcb5d --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/realtime-advanced/stats.go @@ -0,0 +1,58 @@ +package main + +import ( + "runtime" + "sync" + "time" + + "github.com/manucorporat/stats" +) + +var ( + ips = stats.New() + messages = stats.New() + users = stats.New() + mutexStats sync.RWMutex + savedStats map[string]uint64 +) + +func statsWorker() { + c := time.Tick(1 * time.Second) + var lastMallocs uint64 + var lastFrees uint64 + for range c { + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + + mutexStats.Lock() + savedStats = map[string]uint64{ + "timestamp": uint64(time.Now().Unix()), + "HeapInuse": stats.HeapInuse, + "StackInuse": stats.StackInuse, + "Mallocs": stats.Mallocs - lastMallocs, + "Frees": stats.Frees - lastFrees, + "Inbound": uint64(messages.Get("inbound")), + "Outbound": uint64(messages.Get("outbound")), + "Connected": connectedUsers(), + } + lastMallocs = stats.Mallocs + lastFrees = stats.Frees + messages.Reset() + mutexStats.Unlock() + } +} + +func connectedUsers() uint64 { + connected := users.Get("connected") - users.Get("disconnected") + if connected < 0 { + return 0 + } + return uint64(connected) +} + +func Stats() map[string]uint64 { + mutexStats.RLock() + defer mutexStats.RUnlock() + + return savedStats +} diff --git a/vendor/github.com/gin-gonic/gin/examples/realtime-chat/Makefile b/vendor/github.com/gin-gonic/gin/examples/realtime-chat/Makefile new file mode 100644 index 000000000..dea583df1 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/realtime-chat/Makefile @@ -0,0 +1,9 @@ +all: deps build + +.PHONY: deps +deps: + go get -d -v github.com/dustin/go-broadcast/... + +.PHONY: build +build: deps + go build -o realtime-chat main.go rooms.go template.go diff --git a/vendor/github.com/gin-gonic/gin/examples/realtime-chat/main.go b/vendor/github.com/gin-gonic/gin/examples/realtime-chat/main.go new file mode 100644 index 000000000..e4b55a0f0 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/realtime-chat/main.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + "io" + "math/rand" + + "github.com/gin-gonic/gin" +) + +func main() { + router := gin.Default() + router.SetHTMLTemplate(html) + + router.GET("/room/:roomid", roomGET) + router.POST("/room/:roomid", roomPOST) + router.DELETE("/room/:roomid", roomDELETE) + router.GET("/stream/:roomid", stream) + + router.Run(":8080") +} + +func stream(c *gin.Context) { + roomid := c.Param("roomid") + listener := openListener(roomid) + defer closeListener(roomid, listener) + + c.Stream(func(w io.Writer) bool { + c.SSEvent("message", <-listener) + return true + }) +} + +func roomGET(c *gin.Context) { + roomid := c.Param("roomid") + userid := fmt.Sprint(rand.Int31()) + c.HTML(200, "chat_room", gin.H{ + "roomid": roomid, + "userid": userid, + }) +} + +func roomPOST(c *gin.Context) { + roomid := c.Param("roomid") + userid := c.PostForm("user") + message := c.PostForm("message") + room(roomid).Submit(userid + ": " + message) + + c.JSON(200, gin.H{ + "status": "success", + "message": message, + }) +} + +func roomDELETE(c *gin.Context) { + roomid := c.Param("roomid") + deleteBroadcast(roomid) +} diff --git a/vendor/github.com/gin-gonic/gin/examples/realtime-chat/rooms.go b/vendor/github.com/gin-gonic/gin/examples/realtime-chat/rooms.go new file mode 100644 index 000000000..8c62bece1 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/realtime-chat/rooms.go @@ -0,0 +1,33 @@ +package main + +import "github.com/dustin/go-broadcast" + +var roomChannels = make(map[string]broadcast.Broadcaster) + +func openListener(roomid string) chan interface{} { + listener := make(chan interface{}) + room(roomid).Register(listener) + return listener +} + +func closeListener(roomid string, listener chan interface{}) { + room(roomid).Unregister(listener) + close(listener) +} + +func deleteBroadcast(roomid string) { + b, ok := roomChannels[roomid] + if ok { + b.Close() + delete(roomChannels, roomid) + } +} + +func room(roomid string) broadcast.Broadcaster { + b, ok := roomChannels[roomid] + if !ok { + b = broadcast.NewBroadcaster(10) + roomChannels[roomid] = b + } + return b +} diff --git a/vendor/github.com/gin-gonic/gin/examples/realtime-chat/template.go b/vendor/github.com/gin-gonic/gin/examples/realtime-chat/template.go new file mode 100644 index 000000000..b9024de6d --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/realtime-chat/template.go @@ -0,0 +1,44 @@ +package main + +import "html/template" + +var html = template.Must(template.New("chat_room").Parse(` + + + {{.roomid}} + + + + + + +

Welcome to {{.roomid}} room

+
+
+ User: + Message: + +
+ + +`)) diff --git a/vendor/github.com/gin-gonic/gin/examples/template/main.go b/vendor/github.com/gin-gonic/gin/examples/template/main.go new file mode 100644 index 000000000..f9e611dff --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/template/main.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "html/template" + "net/http" + "time" + + "github.com/gin-gonic/gin" +) + +func formatAsDate(t time.Time) string { + year, month, day := t.Date() + return fmt.Sprintf("%d%02d/%02d", year, month, day) +} + +func main() { + router := gin.Default() + router.Delims("{[{", "}]}") + router.SetFuncMap(template.FuncMap{ + "formatAsDate": formatAsDate, + }) + router.LoadHTMLFiles("../../fixtures/basic/raw.tmpl") + + router.GET("/raw", func(c *gin.Context) { + c.HTML(http.StatusOK, "raw.tmpl", map[string]interface{}{ + "now": time.Date(2017, 07, 01, 0, 0, 0, 0, time.UTC), + }) + }) + + router.Run(":8080") +} diff --git a/vendor/github.com/gin-gonic/gin/examples/upload-file/multiple/main.go b/vendor/github.com/gin-gonic/gin/examples/upload-file/multiple/main.go new file mode 100644 index 000000000..a55325ed5 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/upload-file/multiple/main.go @@ -0,0 +1,37 @@ +package main + +import ( + "fmt" + "net/http" + + "github.com/gin-gonic/gin" +) + +func main() { + router := gin.Default() + // Set a lower memory limit for multipart forms (default is 32 MiB) + router.MaxMultipartMemory = 8 << 20 // 8 MiB + router.Static("/", "./public") + router.POST("/upload", func(c *gin.Context) { + name := c.PostForm("name") + email := c.PostForm("email") + + // Multipart form + form, err := c.MultipartForm() + if err != nil { + c.String(http.StatusBadRequest, fmt.Sprintf("get form err: %s", err.Error())) + return + } + files := form.File["files"] + + for _, file := range files { + if err := c.SaveUploadedFile(file, file.Filename); err != nil { + c.String(http.StatusBadRequest, fmt.Sprintf("upload file err: %s", err.Error())) + return + } + } + + c.String(http.StatusOK, fmt.Sprintf("Uploaded successfully %d files with fields name=%s and email=%s.", len(files), name, email)) + }) + router.Run(":8080") +} diff --git a/vendor/github.com/gin-gonic/gin/examples/upload-file/multiple/public/index.html b/vendor/github.com/gin-gonic/gin/examples/upload-file/multiple/public/index.html new file mode 100644 index 000000000..b8463601a --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/upload-file/multiple/public/index.html @@ -0,0 +1,17 @@ + + + + + Multiple file upload + + +

Upload multiple files with fields

+ +
+ Name:
+ Email:
+ Files:

+ +
+ + diff --git a/vendor/github.com/gin-gonic/gin/examples/upload-file/single/main.go b/vendor/github.com/gin-gonic/gin/examples/upload-file/single/main.go new file mode 100644 index 000000000..5d438651a --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/upload-file/single/main.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + "net/http" + + "github.com/gin-gonic/gin" +) + +func main() { + router := gin.Default() + // Set a lower memory limit for multipart forms (default is 32 MiB) + router.MaxMultipartMemory = 8 << 20 // 8 MiB + router.Static("/", "./public") + router.POST("/upload", func(c *gin.Context) { + name := c.PostForm("name") + email := c.PostForm("email") + + // Source + file, err := c.FormFile("file") + if err != nil { + c.String(http.StatusBadRequest, fmt.Sprintf("get form err: %s", err.Error())) + return + } + + if err := c.SaveUploadedFile(file, file.Filename); err != nil { + c.String(http.StatusBadRequest, fmt.Sprintf("upload file err: %s", err.Error())) + return + } + + c.String(http.StatusOK, fmt.Sprintf("File %s uploaded successfully with fields name=%s and email=%s.", file.Filename, name, email)) + }) + router.Run(":8080") +} diff --git a/vendor/github.com/gin-gonic/gin/examples/upload-file/single/public/index.html b/vendor/github.com/gin-gonic/gin/examples/upload-file/single/public/index.html new file mode 100644 index 000000000..b0c2a8084 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/examples/upload-file/single/public/index.html @@ -0,0 +1,16 @@ + + + + + Single file upload + + +

Upload single file with fields

+ +
+ Name:
+ Email:
+ Files:

+ +
+ diff --git a/vendor/github.com/gin-gonic/gin/fixtures/basic/hello.tmpl b/vendor/github.com/gin-gonic/gin/fixtures/basic/hello.tmpl new file mode 100644 index 000000000..0767ef3f2 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/fixtures/basic/hello.tmpl @@ -0,0 +1 @@ +

Hello {[{.name}]}

\ No newline at end of file diff --git a/vendor/github.com/gin-gonic/gin/fixtures/basic/raw.tmpl b/vendor/github.com/gin-gonic/gin/fixtures/basic/raw.tmpl new file mode 100644 index 000000000..8bc757030 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/fixtures/basic/raw.tmpl @@ -0,0 +1 @@ +Date: {[{.now | formatAsDate}]} diff --git a/vendor/github.com/gin-gonic/gin/fixtures/testdata/cert.pem b/vendor/github.com/gin-gonic/gin/fixtures/testdata/cert.pem new file mode 100644 index 000000000..c1d3d6328 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/fixtures/testdata/cert.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC9DCCAdygAwIBAgIQUNSK+OxWHYYFxHVJV0IlpDANBgkqhkiG9w0BAQsFADAS +MRAwDgYDVQQKEwdBY21lIENvMB4XDTE3MTExNjEyMDA0N1oXDTE4MTExNjEyMDA0 +N1owEjEQMA4GA1UEChMHQWNtZSBDbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKmyj/YZpD59Bpy4w3qf6VzMw9uUBsWp+IP4kl7z5cmGHYUHn/YopTLH +vR23GAB12p6Km5QWzCBuJF4j61PJXHfg3/rjShZ77JcQ3kzxuy1iKDI+DNKN7Klz +rdjJ49QD0lczZHeBvvCL7JsJFKFjGy62rnStuW8LaIEdtjXT+GUZTxJh6G7yPYfD +MS1IsdMQGOdbGwNa+qogMuQPh0TzHw+C73myKrjY6pREijknMC/rnIMz9dLPt6Kl +xXy4br443dpY6dYGIhDuKhROT+vZ05HKasuuQUFhY7v/KoUpEZMB9rfUSzjQ5fak +eDUAMniXRcd+DmwvboG2TI6ixmuPK+ECAwEAAaNGMEQwDgYDVR0PAQH/BAQDAgWg +MBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwDwYDVR0RBAgwBocE +fwAAATANBgkqhkiG9w0BAQsFAAOCAQEAMXOLvj7BFsxdbcfRPBd0OFrH/8lI7vPV +LRcJ6r5iv0cnNvZXXbIOQLbg4clJAWjoE08nRm1KvNXhKdns0ELEV86YN2S6jThq +rIGrBqKtaJLB3M9BtDSiQ6SGPLYrWvmhj3Avi8PbSGy51bpGbqopd16j6LYU7Cp2 +TefMRlOAFtHojpCVon1CMpqcNxS0WNlQ3lUBSrw3HB0o12x++roja2ibF54tSHXB +KUuadoEzN+mMBwenEBychmAGzdiG4GQHRmhigh85+mtW6UMGiqyCZHs0EgE9FCLL +sRrsTI/VOzLz6lluygXkOsXrP+PP0SvmE3eylWjj9e2nj/u/Cy2YKg== +-----END CERTIFICATE----- diff --git a/vendor/github.com/gin-gonic/gin/fixtures/testdata/key.pem b/vendor/github.com/gin-gonic/gin/fixtures/testdata/key.pem new file mode 100644 index 000000000..c2a0181fe --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/fixtures/testdata/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAqbKP9hmkPn0GnLjDep/pXMzD25QGxan4g/iSXvPlyYYdhQef +9iilMse9HbcYAHXanoqblBbMIG4kXiPrU8lcd+Df+uNKFnvslxDeTPG7LWIoMj4M +0o3sqXOt2Mnj1APSVzNkd4G+8IvsmwkUoWMbLraudK25bwtogR22NdP4ZRlPEmHo +bvI9h8MxLUix0xAY51sbA1r6qiAy5A+HRPMfD4LvebIquNjqlESKOScwL+ucgzP1 +0s+3oqXFfLhuvjjd2ljp1gYiEO4qFE5P69nTkcpqy65BQWFju/8qhSkRkwH2t9RL +ONDl9qR4NQAyeJdFx34ObC9ugbZMjqLGa48r4QIDAQABAoIBAD5mhd+GMEo2KU9J +9b/Ku8I/HapJtW/L/7Fvn0tBPncrVQGM+zpGWfDhV95sbGwG6lwwNeNvuqIWPlNL +vAY0XkdKrrIQEDdSXH50WnpKzXxzwrou7QIj5Cmvevbjzl4xBZDBOilj0XWczmV4 +IljyG5XC4UXQeAaoWEZaSZ1jk8yAt2Zq1Hgg7HqhHsK/arWXBgax+4K5nV/s9gZx +yjKU9mXTIs7k/aNnZqwQKqcZF+l3mvbZttOaFwsP14H0I8OFWhnM9hie54Dejqxi +f4/llNxDqUs6lqJfP3qNxtORLcFe75M+Yl8v7g2hkjtLdZBakPzSTEx3TAK/UHgi +aM8DdxECgYEA3fmg/PI4EgUEj0C3SCmQXR/CnQLMUQgb54s0asp4akvp+M7YCcr1 +pQd3HFUpBwhBcJg5LeSe87vLupY7pHCKk56cl9WY6hse0b9sP/7DWJuGiO62m0E0 +vNjQ2jpG99oR2ROIHHeWsGCpGLmrRT/kY+vR3M+AOLZniXlOCw8k0aUCgYEAw7WL +XFWLxgZYQYilywqrQmfv1MBfaUCvykO6oWB+f6mmnihSFjecI+nDw/b3yXVYGEgy +0ebkuw0jP8suC8wBqX9WuXj+9nZNomJRssJyOMiEhDEqUiTztFPSp9pdruoakLTh +Wk1p9NralOqGPUmxpXlFKVmYRTUbluikVxDypI0CgYBn6sqEQH0hann0+o4TWWn9 +PrYkPUAbm1k8771tVTZERR/W3Dbldr/DL5iCihe39BR2urziEEqdvkglJNntJMar +TzDuIBADYQjvltb9qq4XGFBGYMLaMg+XbUVxNKEuvUdnwa4R7aZ9EfN34MwekkfA +w5Cu9/GGG1ajVEfGA6PwBQKBgA3o71jGs8KFXOx7e90sivOTU5Z5fc6LTHNB0Rf7 +NcJ5GmCPWRY/KZfb25AoE4B8GKDRMNt+X69zxZeZJ1KrU0rqxA02rlhyHB54gnoE +G/4xMkn6/JkOC0w70PMhMBtohC7YzFOQwQEoNPT0nkno3Pl33xSLS6lPlwBo1JVj +nPtZAoGACXNLXYkR5vexE+w6FGl59r4RQhu1XU8Mr5DIHeB7kXPN3RKbS201M+Tb +SB5jbu0iDV477XkzSNmhaksFf2wM9MT6CaE+8n3UU5tMa+MmBGgwYTp/i9HkqVh5 +jjpJifn1VWBINd4cpNzwCg9LXoo0tbtUPWwGzqVeyo/YE5GIHGo= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/gin-gonic/gin/fs.go b/vendor/github.com/gin-gonic/gin/fs.go new file mode 100644 index 000000000..7a6738a68 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/fs.go @@ -0,0 +1,45 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "net/http" + "os" +) + +type onlyfilesFS struct { + fs http.FileSystem +} + +type neuteredReaddirFile struct { + http.File +} + +// Dir returns a http.Filesystem that can be used by http.FileServer(). It is used internally +// in router.Static(). +// if listDirectory == true, then it works the same as http.Dir() otherwise it returns +// a filesystem that prevents http.FileServer() to list the directory files. +func Dir(root string, listDirectory bool) http.FileSystem { + fs := http.Dir(root) + if listDirectory { + return fs + } + return &onlyfilesFS{fs} +} + +// Open conforms to http.Filesystem. +func (fs onlyfilesFS) Open(name string) (http.File, error) { + f, err := fs.fs.Open(name) + if err != nil { + return nil, err + } + return neuteredReaddirFile{f}, nil +} + +// Readdir overrides the http.File default implementation. +func (f neuteredReaddirFile) Readdir(count int) ([]os.FileInfo, error) { + // this disables directory listing + return nil, nil +} diff --git a/vendor/github.com/gin-gonic/gin/gin.go b/vendor/github.com/gin-gonic/gin/gin.go new file mode 100644 index 000000000..4205eff06 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/gin.go @@ -0,0 +1,444 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "html/template" + "net" + "net/http" + "os" + "sync" + + "github.com/gin-gonic/gin/render" +) + +const ( + // Version is Framework's version. + Version = "v1.2" + defaultMultipartMemory = 32 << 20 // 32 MB +) + +var ( + default404Body = []byte("404 page not found") + default405Body = []byte("405 method not allowed") + defaultAppEngine bool +) + +type HandlerFunc func(*Context) +type HandlersChain []HandlerFunc + +// Last returns the last handler in the chain. ie. the last handler is the main own. +func (c HandlersChain) Last() HandlerFunc { + if length := len(c); length > 0 { + return c[length-1] + } + return nil +} + +type RouteInfo struct { + Method string + Path string + Handler string +} + +type RoutesInfo []RouteInfo + +// Engine is the framework's instance, it contains the muxer, middleware and configuration settings. +// Create an instance of Engine, by using New() or Default() +type Engine struct { + RouterGroup + + // Enables automatic redirection if the current route can't be matched but a + // handler for the path with (without) the trailing slash exists. + // For example if /foo/ is requested but a route only exists for /foo, the + // client is redirected to /foo with http status code 301 for GET requests + // and 307 for all other request methods. + RedirectTrailingSlash bool + + // If enabled, the router tries to fix the current request path, if no + // handle is registered for it. + // First superfluous path elements like ../ or // are removed. + // Afterwards the router does a case-insensitive lookup of the cleaned path. + // If a handle can be found for this route, the router makes a redirection + // to the corrected path with status code 301 for GET requests and 307 for + // all other request methods. + // For example /FOO and /..//Foo could be redirected to /foo. + // RedirectTrailingSlash is independent of this option. + RedirectFixedPath bool + + // If enabled, the router checks if another method is allowed for the + // current route, if the current request can not be routed. + // If this is the case, the request is answered with 'Method Not Allowed' + // and HTTP status code 405. + // If no other Method is allowed, the request is delegated to the NotFound + // handler. + HandleMethodNotAllowed bool + ForwardedByClientIP bool + + // #726 #755 If enabled, it will thrust some headers starting with + // 'X-AppEngine...' for better integration with that PaaS. + AppEngine bool + + // If enabled, the url.RawPath will be used to find parameters. + UseRawPath bool + + // If true, the path value will be unescaped. + // If UseRawPath is false (by default), the UnescapePathValues effectively is true, + // as url.Path gonna be used, which is already unescaped. + UnescapePathValues bool + + // Value of 'maxMemory' param that is given to http.Request's ParseMultipartForm + // method call. + MaxMultipartMemory int64 + + delims render.Delims + secureJsonPrefix string + HTMLRender render.HTMLRender + FuncMap template.FuncMap + allNoRoute HandlersChain + allNoMethod HandlersChain + noRoute HandlersChain + noMethod HandlersChain + pool sync.Pool + trees methodTrees +} + +var _ IRouter = &Engine{} + +// New returns a new blank Engine instance without any middleware attached. +// By default the configuration is: +// - RedirectTrailingSlash: true +// - RedirectFixedPath: false +// - HandleMethodNotAllowed: false +// - ForwardedByClientIP: true +// - UseRawPath: false +// - UnescapePathValues: true +func New() *Engine { + debugPrintWARNINGNew() + engine := &Engine{ + RouterGroup: RouterGroup{ + Handlers: nil, + basePath: "/", + root: true, + }, + FuncMap: template.FuncMap{}, + RedirectTrailingSlash: true, + RedirectFixedPath: false, + HandleMethodNotAllowed: false, + ForwardedByClientIP: true, + AppEngine: defaultAppEngine, + UseRawPath: false, + UnescapePathValues: true, + MaxMultipartMemory: defaultMultipartMemory, + trees: make(methodTrees, 0, 9), + delims: render.Delims{Left: "{{", Right: "}}"}, + secureJsonPrefix: "while(1);", + } + engine.RouterGroup.engine = engine + engine.pool.New = func() interface{} { + return engine.allocateContext() + } + return engine +} + +// Default returns an Engine instance with the Logger and Recovery middleware already attached. +func Default() *Engine { + debugPrintWARNINGDefault() + engine := New() + engine.Use(Logger(), Recovery()) + return engine +} + +func (engine *Engine) allocateContext() *Context { + return &Context{engine: engine} +} + +func (engine *Engine) Delims(left, right string) *Engine { + engine.delims = render.Delims{Left: left, Right: right} + return engine +} + +// SecureJsonPrefix sets the secureJsonPrefix used in Context.SecureJSON. +func (engine *Engine) SecureJsonPrefix(prefix string) *Engine { + engine.secureJsonPrefix = prefix + return engine +} + +// LoadHTMLGlob loads HTML files identified by glob pattern +// and associates the result with HTML renderer. +func (engine *Engine) LoadHTMLGlob(pattern string) { + left := engine.delims.Left + right := engine.delims.Right + + if IsDebugging() { + debugPrintLoadTemplate(template.Must(template.New("").Delims(left, right).Funcs(engine.FuncMap).ParseGlob(pattern))) + engine.HTMLRender = render.HTMLDebug{Glob: pattern, FuncMap: engine.FuncMap, Delims: engine.delims} + return + } + + templ := template.Must(template.New("").Delims(left, right).Funcs(engine.FuncMap).ParseGlob(pattern)) + engine.SetHTMLTemplate(templ) +} + +// LoadHTMLFiles loads a slice of HTML files +// and associates the result with HTML renderer. +func (engine *Engine) LoadHTMLFiles(files ...string) { + if IsDebugging() { + engine.HTMLRender = render.HTMLDebug{Files: files, FuncMap: engine.FuncMap, Delims: engine.delims} + return + } + + templ := template.Must(template.New("").Delims(engine.delims.Left, engine.delims.Right).Funcs(engine.FuncMap).ParseFiles(files...)) + engine.SetHTMLTemplate(templ) +} + +// SetHTMLTemplate associate a template with HTML renderer. +func (engine *Engine) SetHTMLTemplate(templ *template.Template) { + if len(engine.trees) > 0 { + debugPrintWARNINGSetHTMLTemplate() + } + + engine.HTMLRender = render.HTMLProduction{Template: templ.Funcs(engine.FuncMap)} +} + +// SetFuncMap sets the FuncMap used for template.FuncMap. +func (engine *Engine) SetFuncMap(funcMap template.FuncMap) { + engine.FuncMap = funcMap +} + +// NoRoute adds handlers for NoRoute. It return a 404 code by default. +func (engine *Engine) NoRoute(handlers ...HandlerFunc) { + engine.noRoute = handlers + engine.rebuild404Handlers() +} + +// NoMethod sets the handlers called when... TODO. +func (engine *Engine) NoMethod(handlers ...HandlerFunc) { + engine.noMethod = handlers + engine.rebuild405Handlers() +} + +// Use attachs a global middleware to the router. ie. the middleware attached though Use() will be +// included in the handlers chain for every single request. Even 404, 405, static files... +// For example, this is the right place for a logger or error management middleware. +func (engine *Engine) Use(middleware ...HandlerFunc) IRoutes { + engine.RouterGroup.Use(middleware...) + engine.rebuild404Handlers() + engine.rebuild405Handlers() + return engine +} + +func (engine *Engine) rebuild404Handlers() { + engine.allNoRoute = engine.combineHandlers(engine.noRoute) +} + +func (engine *Engine) rebuild405Handlers() { + engine.allNoMethod = engine.combineHandlers(engine.noMethod) +} + +func (engine *Engine) addRoute(method, path string, handlers HandlersChain) { + assert1(path[0] == '/', "path must begin with '/'") + assert1(method != "", "HTTP method can not be empty") + assert1(len(handlers) > 0, "there must be at least one handler") + + debugPrintRoute(method, path, handlers) + root := engine.trees.get(method) + if root == nil { + root = new(node) + engine.trees = append(engine.trees, methodTree{method: method, root: root}) + } + root.addRoute(path, handlers) +} + +// Routes returns a slice of registered routes, including some useful information, such as: +// the http method, path and the handler name. +func (engine *Engine) Routes() (routes RoutesInfo) { + for _, tree := range engine.trees { + routes = iterate("", tree.method, routes, tree.root) + } + return routes +} + +func iterate(path, method string, routes RoutesInfo, root *node) RoutesInfo { + path += root.path + if len(root.handlers) > 0 { + routes = append(routes, RouteInfo{ + Method: method, + Path: path, + Handler: nameOfFunction(root.handlers.Last()), + }) + } + for _, child := range root.children { + routes = iterate(path, method, routes, child) + } + return routes +} + +// Run attaches the router to a http.Server and starts listening and serving HTTP requests. +// It is a shortcut for http.ListenAndServe(addr, router) +// Note: this method will block the calling goroutine indefinitely unless an error happens. +func (engine *Engine) Run(addr ...string) (err error) { + defer func() { debugPrintError(err) }() + + address := resolveAddress(addr) + debugPrint("Listening and serving HTTP on %s\n", address) + err = http.ListenAndServe(address, engine) + return +} + +// RunTLS attaches the router to a http.Server and starts listening and serving HTTPS (secure) requests. +// It is a shortcut for http.ListenAndServeTLS(addr, certFile, keyFile, router) +// Note: this method will block the calling goroutine indefinitely unless an error happens. +func (engine *Engine) RunTLS(addr, certFile, keyFile string) (err error) { + debugPrint("Listening and serving HTTPS on %s\n", addr) + defer func() { debugPrintError(err) }() + + err = http.ListenAndServeTLS(addr, certFile, keyFile, engine) + return +} + +// RunUnix attaches the router to a http.Server and starts listening and serving HTTP requests +// through the specified unix socket (ie. a file). +// Note: this method will block the calling goroutine indefinitely unless an error happens. +func (engine *Engine) RunUnix(file string) (err error) { + debugPrint("Listening and serving HTTP on unix:/%s", file) + defer func() { debugPrintError(err) }() + + os.Remove(file) + listener, err := net.Listen("unix", file) + if err != nil { + return + } + defer listener.Close() + err = http.Serve(listener, engine) + return +} + +// ServeHTTP conforms to the http.Handler interface. +func (engine *Engine) ServeHTTP(w http.ResponseWriter, req *http.Request) { + c := engine.pool.Get().(*Context) + c.writermem.reset(w) + c.Request = req + c.reset() + + engine.handleHTTPRequest(c) + + engine.pool.Put(c) +} + +// HandleContext re-enter a context that has been rewritten. +// This can be done by setting c.Request.Path to your new target. +// Disclaimer: You can loop yourself to death with this, use wisely. +func (engine *Engine) HandleContext(c *Context) { + c.reset() + engine.handleHTTPRequest(c) + engine.pool.Put(c) +} + +func (engine *Engine) handleHTTPRequest(c *Context) { + httpMethod := c.Request.Method + path := c.Request.URL.Path + unescape := false + if engine.UseRawPath && len(c.Request.URL.RawPath) > 0 { + path = c.Request.URL.RawPath + unescape = engine.UnescapePathValues + } + + // Find root of the tree for the given HTTP method + t := engine.trees + for i, tl := 0, len(t); i < tl; i++ { + if t[i].method == httpMethod { + root := t[i].root + // Find route in tree + handlers, params, tsr := root.getValue(path, c.Params, unescape) + if handlers != nil { + c.handlers = handlers + c.Params = params + c.Next() + c.writermem.WriteHeaderNow() + return + } + if httpMethod != "CONNECT" && path != "/" { + if tsr && engine.RedirectTrailingSlash { + redirectTrailingSlash(c) + return + } + if engine.RedirectFixedPath && redirectFixedPath(c, root, engine.RedirectFixedPath) { + return + } + } + break + } + } + + if engine.HandleMethodNotAllowed { + for _, tree := range engine.trees { + if tree.method != httpMethod { + if handlers, _, _ := tree.root.getValue(path, nil, unescape); handlers != nil { + c.handlers = engine.allNoMethod + serveError(c, 405, default405Body) + return + } + } + } + } + c.handlers = engine.allNoRoute + serveError(c, 404, default404Body) +} + +var mimePlain = []string{MIMEPlain} + +func serveError(c *Context, code int, defaultMessage []byte) { + c.writermem.status = code + c.Next() + if !c.writermem.Written() { + if c.writermem.Status() == code { + c.writermem.Header()["Content-Type"] = mimePlain + c.Writer.Write(defaultMessage) + } else { + c.writermem.WriteHeaderNow() + } + } +} + +func redirectTrailingSlash(c *Context) { + req := c.Request + path := req.URL.Path + code := 301 // Permanent redirect, request with GET method + if req.Method != "GET" { + code = 307 + } + + if length := len(path); length > 1 && path[length-1] == '/' { + req.URL.Path = path[:length-1] + } else { + req.URL.Path = path + "/" + } + debugPrint("redirecting request %d: %s --> %s", code, path, req.URL.String()) + http.Redirect(c.Writer, req, req.URL.String(), code) + c.writermem.WriteHeaderNow() +} + +func redirectFixedPath(c *Context, root *node, trailingSlash bool) bool { + req := c.Request + path := req.URL.Path + + fixedPath, found := root.findCaseInsensitivePath( + cleanPath(path), + trailingSlash, + ) + if found { + code := 301 // Permanent redirect, request with GET method + if req.Method != "GET" { + code = 307 + } + req.URL.Path = string(fixedPath) + debugPrint("redirecting request %d: %s --> %s", code, path, req.URL.String()) + http.Redirect(c.Writer, req, req.URL.String(), code) + c.writermem.WriteHeaderNow() + return true + } + return false +} diff --git a/vendor/github.com/gin-gonic/gin/ginS/README.md b/vendor/github.com/gin-gonic/gin/ginS/README.md new file mode 100644 index 000000000..ef9225e49 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/ginS/README.md @@ -0,0 +1,17 @@ +# Gin Default Server + +This is API experiment for Gin. + +```go +package main + +import ( + "github.com/gin-gonic/gin" + "github.com/gin-gonic/gin/ginS" +) + +func main() { + ginS.GET("/", func(c *gin.Context) { c.String(200, "Hello World") }) + ginS.Run() +} +``` diff --git a/vendor/github.com/gin-gonic/gin/ginS/gins.go b/vendor/github.com/gin-gonic/gin/ginS/gins.go new file mode 100644 index 000000000..ee00b3816 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/ginS/gins.go @@ -0,0 +1,140 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package ginS + +import ( + "html/template" + "net/http" + "sync" + + "github.com/gin-gonic/gin" +) + +var once sync.Once +var internalEngine *gin.Engine + +func engine() *gin.Engine { + once.Do(func() { + internalEngine = gin.Default() + }) + return internalEngine +} + +func LoadHTMLGlob(pattern string) { + engine().LoadHTMLGlob(pattern) +} + +func LoadHTMLFiles(files ...string) { + engine().LoadHTMLFiles(files...) +} + +func SetHTMLTemplate(templ *template.Template) { + engine().SetHTMLTemplate(templ) +} + +// NoRoute adds handlers for NoRoute. It return a 404 code by default. +func NoRoute(handlers ...gin.HandlerFunc) { + engine().NoRoute(handlers...) +} + +// NoMethod sets the handlers called when... TODO +func NoMethod(handlers ...gin.HandlerFunc) { + engine().NoMethod(handlers...) +} + +// Group creates a new router group. You should add all the routes that have common middlwares or the same path prefix. +// For example, all the routes that use a common middlware for authorization could be grouped. +func Group(relativePath string, handlers ...gin.HandlerFunc) *gin.RouterGroup { + return engine().Group(relativePath, handlers...) +} + +func Handle(httpMethod, relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes { + return engine().Handle(httpMethod, relativePath, handlers...) +} + +// POST is a shortcut for router.Handle("POST", path, handle) +func POST(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes { + return engine().POST(relativePath, handlers...) +} + +// GET is a shortcut for router.Handle("GET", path, handle) +func GET(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes { + return engine().GET(relativePath, handlers...) +} + +// DELETE is a shortcut for router.Handle("DELETE", path, handle) +func DELETE(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes { + return engine().DELETE(relativePath, handlers...) +} + +// PATCH is a shortcut for router.Handle("PATCH", path, handle) +func PATCH(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes { + return engine().PATCH(relativePath, handlers...) +} + +// PUT is a shortcut for router.Handle("PUT", path, handle) +func PUT(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes { + return engine().PUT(relativePath, handlers...) +} + +// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle) +func OPTIONS(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes { + return engine().OPTIONS(relativePath, handlers...) +} + +// HEAD is a shortcut for router.Handle("HEAD", path, handle) +func HEAD(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes { + return engine().HEAD(relativePath, handlers...) +} + +func Any(relativePath string, handlers ...gin.HandlerFunc) gin.IRoutes { + return engine().Any(relativePath, handlers...) +} + +func StaticFile(relativePath, filepath string) gin.IRoutes { + return engine().StaticFile(relativePath, filepath) +} + +// Static serves files from the given file system root. +// Internally a http.FileServer is used, therefore http.NotFound is used instead +// of the Router's NotFound handler. +// To use the operating system's file system implementation, +// use : +// router.Static("/static", "/var/www") +func Static(relativePath, root string) gin.IRoutes { + return engine().Static(relativePath, root) +} + +func StaticFS(relativePath string, fs http.FileSystem) gin.IRoutes { + return engine().StaticFS(relativePath, fs) +} + +// Use attachs a global middleware to the router. ie. the middlewares attached though Use() will be +// included in the handlers chain for every single request. Even 404, 405, static files... +// For example, this is the right place for a logger or error management middleware. +func Use(middlewares ...gin.HandlerFunc) gin.IRoutes { + return engine().Use(middlewares...) +} + +// Run : The router is attached to a http.Server and starts listening and serving HTTP requests. +// It is a shortcut for http.ListenAndServe(addr, router) +// Note: this method will block the calling goroutine undefinitelly unless an error happens. +func Run(addr ...string) (err error) { + return engine().Run(addr...) +} + +// RunTLS : The router is attached to a http.Server and starts listening and serving HTTPS requests. +// It is a shortcut for http.ListenAndServeTLS(addr, certFile, keyFile, router) +// Note: this method will block the calling goroutine undefinitelly unless an error happens. +func RunTLS(addr string, certFile string, keyFile string) (err error) { + return engine().RunTLS(addr, certFile, keyFile) +} + +// RunUnix : The router is attached to a http.Server and starts listening and serving HTTP requests +// through the specified unix socket (ie. a file) +// Note: this method will block the calling goroutine undefinitelly unless an error happens. +func RunUnix(file string) (err error) { + return engine().RunUnix(file) +} diff --git a/vendor/github.com/gin-gonic/gin/gin_integration_test.go b/vendor/github.com/gin-gonic/gin/gin_integration_test.go new file mode 100644 index 000000000..52f788423 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/gin_integration_test.go @@ -0,0 +1,135 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bufio" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func testRequest(t *testing.T, url string) { + resp, err := http.Get(url) + assert.NoError(t, err) + defer resp.Body.Close() + + body, ioerr := ioutil.ReadAll(resp.Body) + assert.NoError(t, ioerr) + assert.Equal(t, "it worked", string(body), "resp body should match") + assert.Equal(t, "200 OK", resp.Status, "should get a 200") +} + +func TestRunEmpty(t *testing.T) { + os.Setenv("PORT", "") + router := New() + go func() { + router.GET("/example", func(c *Context) { c.String(http.StatusOK, "it worked") }) + assert.NoError(t, router.Run()) + }() + // have to wait for the goroutine to start and run the server + // otherwise the main thread will complete + time.Sleep(5 * time.Millisecond) + + assert.Error(t, router.Run(":8080")) + testRequest(t, "http://localhost:8080/example") +} + +func TestRunEmptyWithEnv(t *testing.T) { + os.Setenv("PORT", "3123") + router := New() + go func() { + router.GET("/example", func(c *Context) { c.String(http.StatusOK, "it worked") }) + assert.NoError(t, router.Run()) + }() + // have to wait for the goroutine to start and run the server + // otherwise the main thread will complete + time.Sleep(5 * time.Millisecond) + + assert.Error(t, router.Run(":3123")) + testRequest(t, "http://localhost:3123/example") +} + +func TestRunTooMuchParams(t *testing.T) { + router := New() + assert.Panics(t, func() { + router.Run("2", "2") + }) +} + +func TestRunWithPort(t *testing.T) { + router := New() + go func() { + router.GET("/example", func(c *Context) { c.String(http.StatusOK, "it worked") }) + assert.NoError(t, router.Run(":5150")) + }() + // have to wait for the goroutine to start and run the server + // otherwise the main thread will complete + time.Sleep(5 * time.Millisecond) + + assert.Error(t, router.Run(":5150")) + testRequest(t, "http://localhost:5150/example") +} + +func TestUnixSocket(t *testing.T) { + router := New() + + go func() { + router.GET("/example", func(c *Context) { c.String(http.StatusOK, "it worked") }) + assert.NoError(t, router.RunUnix("/tmp/unix_unit_test")) + }() + // have to wait for the goroutine to start and run the server + // otherwise the main thread will complete + time.Sleep(5 * time.Millisecond) + + c, err := net.Dial("unix", "/tmp/unix_unit_test") + assert.NoError(t, err) + + fmt.Fprint(c, "GET /example HTTP/1.0\r\n\r\n") + scanner := bufio.NewScanner(c) + var response string + for scanner.Scan() { + response += scanner.Text() + } + assert.Contains(t, response, "HTTP/1.0 200", "should get a 200") + assert.Contains(t, response, "it worked", "resp body should match") +} + +func TestBadUnixSocket(t *testing.T) { + router := New() + assert.Error(t, router.RunUnix("#/tmp/unix_unit_test")) +} + +func TestWithHttptestWithAutoSelectedPort(t *testing.T) { + router := New() + router.GET("/example", func(c *Context) { c.String(http.StatusOK, "it worked") }) + + ts := httptest.NewServer(router) + defer ts.Close() + + testRequest(t, ts.URL+"/example") +} + +// func TestWithHttptestWithSpecifiedPort(t *testing.T) { +// router := New() +// router.GET("/example", func(c *Context) { c.String(http.StatusOK, "it worked") }) + +// l, _ := net.Listen("tcp", ":8033") +// ts := httptest.Server{ +// Listener: l, +// Config: &http.Server{Handler: router}, +// } +// ts.Start() +// defer ts.Close() + +// testRequest(t, "http://localhost:8033/example") +// } diff --git a/vendor/github.com/gin-gonic/gin/gin_test.go b/vendor/github.com/gin-gonic/gin/gin_test.go new file mode 100644 index 000000000..3ac60577e --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/gin_test.go @@ -0,0 +1,457 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "crypto/tls" + "fmt" + "html/template" + "io/ioutil" + "net/http" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func formatAsDate(t time.Time) string { + year, month, day := t.Date() + return fmt.Sprintf("%d/%02d/%02d", year, month, day) +} + +func setupHTMLFiles(t *testing.T, mode string, tls bool) func() { + go func() { + SetMode(mode) + router := New() + router.Delims("{[{", "}]}") + router.SetFuncMap(template.FuncMap{ + "formatAsDate": formatAsDate, + }) + router.LoadHTMLFiles("./fixtures/basic/hello.tmpl", "./fixtures/basic/raw.tmpl") + router.GET("/test", func(c *Context) { + c.HTML(http.StatusOK, "hello.tmpl", map[string]string{"name": "world"}) + }) + router.GET("/raw", func(c *Context) { + c.HTML(http.StatusOK, "raw.tmpl", map[string]interface{}{ + "now": time.Date(2017, 07, 01, 0, 0, 0, 0, time.UTC), + }) + }) + if tls { + // these files generated by `go run $GOROOT/src/crypto/tls/generate_cert.go --host 127.0.0.1` + router.RunTLS(":9999", "./fixtures/testdata/cert.pem", "./fixtures/testdata/key.pem") + } else { + router.Run(":8888") + } + }() + t.Log("waiting 1 second for server startup") + time.Sleep(1 * time.Second) + return func() {} +} + +func setupHTMLGlob(t *testing.T, mode string, tls bool) func() { + go func() { + SetMode(mode) + router := New() + router.Delims("{[{", "}]}") + router.SetFuncMap(template.FuncMap{ + "formatAsDate": formatAsDate, + }) + router.LoadHTMLGlob("./fixtures/basic/*") + router.GET("/test", func(c *Context) { + c.HTML(http.StatusOK, "hello.tmpl", map[string]string{"name": "world"}) + }) + router.GET("/raw", func(c *Context) { + c.HTML(http.StatusOK, "raw.tmpl", map[string]interface{}{ + "now": time.Date(2017, 07, 01, 0, 0, 0, 0, time.UTC), + }) + }) + if tls { + // these files generated by `go run $GOROOT/src/crypto/tls/generate_cert.go --host 127.0.0.1` + router.RunTLS(":9999", "./fixtures/testdata/cert.pem", "./fixtures/testdata/key.pem") + } else { + router.Run(":8888") + } + }() + t.Log("waiting 1 second for server startup") + time.Sleep(1 * time.Second) + return func() {} +} + +func TestLoadHTMLGlob(t *testing.T) { + td := setupHTMLGlob(t, DebugMode, false) + res, err := http.Get("http://127.0.0.1:8888/test") + if err != nil { + fmt.Println(err) + } + + resp, _ := ioutil.ReadAll(res.Body) + assert.Equal(t, "

Hello world

", string(resp[:])) + + td() +} + +func TestLoadHTMLGlob2(t *testing.T) { + td := setupHTMLGlob(t, TestMode, false) + res, err := http.Get("http://127.0.0.1:8888/test") + if err != nil { + fmt.Println(err) + } + + resp, _ := ioutil.ReadAll(res.Body) + assert.Equal(t, "

Hello world

", string(resp[:])) + + td() +} + +func TestLoadHTMLGlob3(t *testing.T) { + td := setupHTMLGlob(t, ReleaseMode, false) + res, err := http.Get("http://127.0.0.1:8888/test") + if err != nil { + fmt.Println(err) + } + + resp, _ := ioutil.ReadAll(res.Body) + assert.Equal(t, "

Hello world

", string(resp[:])) + + td() +} + +func TestLoadHTMLGlobUsingTLS(t *testing.T) { + td := setupHTMLGlob(t, DebugMode, true) + // Use InsecureSkipVerify for avoiding `x509: certificate signed by unknown authority` error + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + client := &http.Client{Transport: tr} + res, err := client.Get("https://127.0.0.1:9999/test") + if err != nil { + fmt.Println(err) + } + + resp, _ := ioutil.ReadAll(res.Body) + assert.Equal(t, "

Hello world

", string(resp[:])) + + td() +} + +func TestLoadHTMLGlobFromFuncMap(t *testing.T) { + time.Now() + td := setupHTMLGlob(t, DebugMode, false) + res, err := http.Get("http://127.0.0.1:8888/raw") + if err != nil { + fmt.Println(err) + } + + resp, _ := ioutil.ReadAll(res.Body) + assert.Equal(t, "Date: 2017/07/01\n", string(resp[:])) + + td() +} + +func init() { + SetMode(TestMode) +} + +func TestCreateEngine(t *testing.T) { + router := New() + assert.Equal(t, "/", router.basePath) + assert.Equal(t, router.engine, router) + assert.Empty(t, router.Handlers) +} + +// func TestLoadHTMLDebugMode(t *testing.T) { +// router := New() +// SetMode(DebugMode) +// router.LoadHTMLGlob("*.testtmpl") +// r := router.HTMLRender.(render.HTMLDebug) +// assert.Empty(t, r.Files) +// assert.Equal(t, "*.testtmpl", r.Glob) +// +// router.LoadHTMLFiles("index.html.testtmpl", "login.html.testtmpl") +// r = router.HTMLRender.(render.HTMLDebug) +// assert.Empty(t, r.Glob) +// assert.Equal(t, []string{"index.html", "login.html"}, r.Files) +// SetMode(TestMode) +// } + +func TestLoadHTMLFiles(t *testing.T) { + td := setupHTMLFiles(t, TestMode, false) + res, err := http.Get("http://127.0.0.1:8888/test") + if err != nil { + fmt.Println(err) + } + + resp, _ := ioutil.ReadAll(res.Body) + assert.Equal(t, "

Hello world

", string(resp[:])) + td() +} + +func TestLoadHTMLFiles2(t *testing.T) { + td := setupHTMLFiles(t, DebugMode, false) + res, err := http.Get("http://127.0.0.1:8888/test") + if err != nil { + fmt.Println(err) + } + + resp, _ := ioutil.ReadAll(res.Body) + assert.Equal(t, "

Hello world

", string(resp[:])) + td() +} + +func TestLoadHTMLFiles3(t *testing.T) { + td := setupHTMLFiles(t, ReleaseMode, false) + res, err := http.Get("http://127.0.0.1:8888/test") + if err != nil { + fmt.Println(err) + } + + resp, _ := ioutil.ReadAll(res.Body) + assert.Equal(t, "

Hello world

", string(resp[:])) + td() +} + +func TestLoadHTMLFilesUsingTLS(t *testing.T) { + td := setupHTMLFiles(t, TestMode, true) + // Use InsecureSkipVerify for avoiding `x509: certificate signed by unknown authority` error + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + client := &http.Client{Transport: tr} + res, err := client.Get("https://127.0.0.1:9999/test") + if err != nil { + fmt.Println(err) + } + + resp, _ := ioutil.ReadAll(res.Body) + assert.Equal(t, "

Hello world

", string(resp[:])) + td() +} + +func TestLoadHTMLFilesFuncMap(t *testing.T) { + time.Now() + td := setupHTMLFiles(t, TestMode, false) + res, err := http.Get("http://127.0.0.1:8888/raw") + if err != nil { + fmt.Println(err) + } + + resp, _ := ioutil.ReadAll(res.Body) + assert.Equal(t, "Date: 2017/07/01\n", string(resp[:])) + + td() +} + +func TestAddRoute(t *testing.T) { + router := New() + router.addRoute("GET", "/", HandlersChain{func(_ *Context) {}}) + + assert.Len(t, router.trees, 1) + assert.NotNil(t, router.trees.get("GET")) + assert.Nil(t, router.trees.get("POST")) + + router.addRoute("POST", "/", HandlersChain{func(_ *Context) {}}) + + assert.Len(t, router.trees, 2) + assert.NotNil(t, router.trees.get("GET")) + assert.NotNil(t, router.trees.get("POST")) + + router.addRoute("POST", "/post", HandlersChain{func(_ *Context) {}}) + assert.Len(t, router.trees, 2) +} + +func TestAddRouteFails(t *testing.T) { + router := New() + assert.Panics(t, func() { router.addRoute("", "/", HandlersChain{func(_ *Context) {}}) }) + assert.Panics(t, func() { router.addRoute("GET", "a", HandlersChain{func(_ *Context) {}}) }) + assert.Panics(t, func() { router.addRoute("GET", "/", HandlersChain{}) }) + + router.addRoute("POST", "/post", HandlersChain{func(_ *Context) {}}) + assert.Panics(t, func() { + router.addRoute("POST", "/post", HandlersChain{func(_ *Context) {}}) + }) +} + +func TestCreateDefaultRouter(t *testing.T) { + router := Default() + assert.Len(t, router.Handlers, 2) +} + +func TestNoRouteWithoutGlobalHandlers(t *testing.T) { + var middleware0 HandlerFunc = func(c *Context) {} + var middleware1 HandlerFunc = func(c *Context) {} + + router := New() + + router.NoRoute(middleware0) + assert.Nil(t, router.Handlers) + assert.Len(t, router.noRoute, 1) + assert.Len(t, router.allNoRoute, 1) + compareFunc(t, router.noRoute[0], middleware0) + compareFunc(t, router.allNoRoute[0], middleware0) + + router.NoRoute(middleware1, middleware0) + assert.Len(t, router.noRoute, 2) + assert.Len(t, router.allNoRoute, 2) + compareFunc(t, router.noRoute[0], middleware1) + compareFunc(t, router.allNoRoute[0], middleware1) + compareFunc(t, router.noRoute[1], middleware0) + compareFunc(t, router.allNoRoute[1], middleware0) +} + +func TestNoRouteWithGlobalHandlers(t *testing.T) { + var middleware0 HandlerFunc = func(c *Context) {} + var middleware1 HandlerFunc = func(c *Context) {} + var middleware2 HandlerFunc = func(c *Context) {} + + router := New() + router.Use(middleware2) + + router.NoRoute(middleware0) + assert.Len(t, router.allNoRoute, 2) + assert.Len(t, router.Handlers, 1) + assert.Len(t, router.noRoute, 1) + + compareFunc(t, router.Handlers[0], middleware2) + compareFunc(t, router.noRoute[0], middleware0) + compareFunc(t, router.allNoRoute[0], middleware2) + compareFunc(t, router.allNoRoute[1], middleware0) + + router.Use(middleware1) + assert.Len(t, router.allNoRoute, 3) + assert.Len(t, router.Handlers, 2) + assert.Len(t, router.noRoute, 1) + + compareFunc(t, router.Handlers[0], middleware2) + compareFunc(t, router.Handlers[1], middleware1) + compareFunc(t, router.noRoute[0], middleware0) + compareFunc(t, router.allNoRoute[0], middleware2) + compareFunc(t, router.allNoRoute[1], middleware1) + compareFunc(t, router.allNoRoute[2], middleware0) +} + +func TestNoMethodWithoutGlobalHandlers(t *testing.T) { + var middleware0 HandlerFunc = func(c *Context) {} + var middleware1 HandlerFunc = func(c *Context) {} + + router := New() + + router.NoMethod(middleware0) + assert.Empty(t, router.Handlers) + assert.Len(t, router.noMethod, 1) + assert.Len(t, router.allNoMethod, 1) + compareFunc(t, router.noMethod[0], middleware0) + compareFunc(t, router.allNoMethod[0], middleware0) + + router.NoMethod(middleware1, middleware0) + assert.Len(t, router.noMethod, 2) + assert.Len(t, router.allNoMethod, 2) + compareFunc(t, router.noMethod[0], middleware1) + compareFunc(t, router.allNoMethod[0], middleware1) + compareFunc(t, router.noMethod[1], middleware0) + compareFunc(t, router.allNoMethod[1], middleware0) +} + +func TestRebuild404Handlers(t *testing.T) { + +} + +func TestNoMethodWithGlobalHandlers(t *testing.T) { + var middleware0 HandlerFunc = func(c *Context) {} + var middleware1 HandlerFunc = func(c *Context) {} + var middleware2 HandlerFunc = func(c *Context) {} + + router := New() + router.Use(middleware2) + + router.NoMethod(middleware0) + assert.Len(t, router.allNoMethod, 2) + assert.Len(t, router.Handlers, 1) + assert.Len(t, router.noMethod, 1) + + compareFunc(t, router.Handlers[0], middleware2) + compareFunc(t, router.noMethod[0], middleware0) + compareFunc(t, router.allNoMethod[0], middleware2) + compareFunc(t, router.allNoMethod[1], middleware0) + + router.Use(middleware1) + assert.Len(t, router.allNoMethod, 3) + assert.Len(t, router.Handlers, 2) + assert.Len(t, router.noMethod, 1) + + compareFunc(t, router.Handlers[0], middleware2) + compareFunc(t, router.Handlers[1], middleware1) + compareFunc(t, router.noMethod[0], middleware0) + compareFunc(t, router.allNoMethod[0], middleware2) + compareFunc(t, router.allNoMethod[1], middleware1) + compareFunc(t, router.allNoMethod[2], middleware0) +} + +func compareFunc(t *testing.T, a, b interface{}) { + sf1 := reflect.ValueOf(a) + sf2 := reflect.ValueOf(b) + if sf1.Pointer() != sf2.Pointer() { + t.Error("different functions") + } +} + +func TestListOfRoutes(t *testing.T) { + router := New() + router.GET("/favicon.ico", handlerTest1) + router.GET("/", handlerTest1) + group := router.Group("/users") + { + group.GET("/", handlerTest2) + group.GET("/:id", handlerTest1) + group.POST("/:id", handlerTest2) + } + router.Static("/static", ".") + + list := router.Routes() + + assert.Len(t, list, 7) + assertRoutePresent(t, list, RouteInfo{ + Method: "GET", + Path: "/favicon.ico", + Handler: "^(.*/vendor/)?github.com/gin-gonic/gin.handlerTest1$", + }) + assertRoutePresent(t, list, RouteInfo{ + Method: "GET", + Path: "/", + Handler: "^(.*/vendor/)?github.com/gin-gonic/gin.handlerTest1$", + }) + assertRoutePresent(t, list, RouteInfo{ + Method: "GET", + Path: "/users/", + Handler: "^(.*/vendor/)?github.com/gin-gonic/gin.handlerTest2$", + }) + assertRoutePresent(t, list, RouteInfo{ + Method: "GET", + Path: "/users/:id", + Handler: "^(.*/vendor/)?github.com/gin-gonic/gin.handlerTest1$", + }) + assertRoutePresent(t, list, RouteInfo{ + Method: "POST", + Path: "/users/:id", + Handler: "^(.*/vendor/)?github.com/gin-gonic/gin.handlerTest2$", + }) +} + +func assertRoutePresent(t *testing.T, gotRoutes RoutesInfo, wantRoute RouteInfo) { + for _, gotRoute := range gotRoutes { + if gotRoute.Path == wantRoute.Path && gotRoute.Method == wantRoute.Method { + assert.Regexp(t, wantRoute.Handler, gotRoute.Handler) + return + } + } + t.Errorf("route not found: %v", wantRoute) +} + +func handlerTest1(c *Context) {} +func handlerTest2(c *Context) {} diff --git a/vendor/github.com/gin-gonic/gin/githubapi_test.go b/vendor/github.com/gin-gonic/gin/githubapi_test.go new file mode 100644 index 000000000..a08c264d7 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/githubapi_test.go @@ -0,0 +1,390 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bytes" + "fmt" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +type route struct { + method string + path string +} + +// http://developer.github.com/v3/ +var githubAPI = []route{ + // OAuth Authorizations + {"GET", "/authorizations"}, + {"GET", "/authorizations/:id"}, + {"POST", "/authorizations"}, + //{"PUT", "/authorizations/clients/:client_id"}, + //{"PATCH", "/authorizations/:id"}, + {"DELETE", "/authorizations/:id"}, + {"GET", "/applications/:client_id/tokens/:access_token"}, + {"DELETE", "/applications/:client_id/tokens"}, + {"DELETE", "/applications/:client_id/tokens/:access_token"}, + + // Activity + {"GET", "/events"}, + {"GET", "/repos/:owner/:repo/events"}, + {"GET", "/networks/:owner/:repo/events"}, + {"GET", "/orgs/:org/events"}, + {"GET", "/users/:user/received_events"}, + {"GET", "/users/:user/received_events/public"}, + {"GET", "/users/:user/events"}, + {"GET", "/users/:user/events/public"}, + {"GET", "/users/:user/events/orgs/:org"}, + {"GET", "/feeds"}, + {"GET", "/notifications"}, + {"GET", "/repos/:owner/:repo/notifications"}, + {"PUT", "/notifications"}, + {"PUT", "/repos/:owner/:repo/notifications"}, + {"GET", "/notifications/threads/:id"}, + //{"PATCH", "/notifications/threads/:id"}, + {"GET", "/notifications/threads/:id/subscription"}, + {"PUT", "/notifications/threads/:id/subscription"}, + {"DELETE", "/notifications/threads/:id/subscription"}, + {"GET", "/repos/:owner/:repo/stargazers"}, + {"GET", "/users/:user/starred"}, + {"GET", "/user/starred"}, + {"GET", "/user/starred/:owner/:repo"}, + {"PUT", "/user/starred/:owner/:repo"}, + {"DELETE", "/user/starred/:owner/:repo"}, + {"GET", "/repos/:owner/:repo/subscribers"}, + {"GET", "/users/:user/subscriptions"}, + {"GET", "/user/subscriptions"}, + {"GET", "/repos/:owner/:repo/subscription"}, + {"PUT", "/repos/:owner/:repo/subscription"}, + {"DELETE", "/repos/:owner/:repo/subscription"}, + {"GET", "/user/subscriptions/:owner/:repo"}, + {"PUT", "/user/subscriptions/:owner/:repo"}, + {"DELETE", "/user/subscriptions/:owner/:repo"}, + + // Gists + {"GET", "/users/:user/gists"}, + {"GET", "/gists"}, + //{"GET", "/gists/public"}, + //{"GET", "/gists/starred"}, + {"GET", "/gists/:id"}, + {"POST", "/gists"}, + //{"PATCH", "/gists/:id"}, + {"PUT", "/gists/:id/star"}, + {"DELETE", "/gists/:id/star"}, + {"GET", "/gists/:id/star"}, + {"POST", "/gists/:id/forks"}, + {"DELETE", "/gists/:id"}, + + // Git Data + {"GET", "/repos/:owner/:repo/git/blobs/:sha"}, + {"POST", "/repos/:owner/:repo/git/blobs"}, + {"GET", "/repos/:owner/:repo/git/commits/:sha"}, + {"POST", "/repos/:owner/:repo/git/commits"}, + //{"GET", "/repos/:owner/:repo/git/refs/*ref"}, + {"GET", "/repos/:owner/:repo/git/refs"}, + {"POST", "/repos/:owner/:repo/git/refs"}, + //{"PATCH", "/repos/:owner/:repo/git/refs/*ref"}, + //{"DELETE", "/repos/:owner/:repo/git/refs/*ref"}, + {"GET", "/repos/:owner/:repo/git/tags/:sha"}, + {"POST", "/repos/:owner/:repo/git/tags"}, + {"GET", "/repos/:owner/:repo/git/trees/:sha"}, + {"POST", "/repos/:owner/:repo/git/trees"}, + + // Issues + {"GET", "/issues"}, + {"GET", "/user/issues"}, + {"GET", "/orgs/:org/issues"}, + {"GET", "/repos/:owner/:repo/issues"}, + {"GET", "/repos/:owner/:repo/issues/:number"}, + {"POST", "/repos/:owner/:repo/issues"}, + //{"PATCH", "/repos/:owner/:repo/issues/:number"}, + {"GET", "/repos/:owner/:repo/assignees"}, + {"GET", "/repos/:owner/:repo/assignees/:assignee"}, + {"GET", "/repos/:owner/:repo/issues/:number/comments"}, + //{"GET", "/repos/:owner/:repo/issues/comments"}, + //{"GET", "/repos/:owner/:repo/issues/comments/:id"}, + {"POST", "/repos/:owner/:repo/issues/:number/comments"}, + //{"PATCH", "/repos/:owner/:repo/issues/comments/:id"}, + //{"DELETE", "/repos/:owner/:repo/issues/comments/:id"}, + {"GET", "/repos/:owner/:repo/issues/:number/events"}, + //{"GET", "/repos/:owner/:repo/issues/events"}, + //{"GET", "/repos/:owner/:repo/issues/events/:id"}, + {"GET", "/repos/:owner/:repo/labels"}, + {"GET", "/repos/:owner/:repo/labels/:name"}, + {"POST", "/repos/:owner/:repo/labels"}, + //{"PATCH", "/repos/:owner/:repo/labels/:name"}, + {"DELETE", "/repos/:owner/:repo/labels/:name"}, + {"GET", "/repos/:owner/:repo/issues/:number/labels"}, + {"POST", "/repos/:owner/:repo/issues/:number/labels"}, + {"DELETE", "/repos/:owner/:repo/issues/:number/labels/:name"}, + {"PUT", "/repos/:owner/:repo/issues/:number/labels"}, + {"DELETE", "/repos/:owner/:repo/issues/:number/labels"}, + {"GET", "/repos/:owner/:repo/milestones/:number/labels"}, + {"GET", "/repos/:owner/:repo/milestones"}, + {"GET", "/repos/:owner/:repo/milestones/:number"}, + {"POST", "/repos/:owner/:repo/milestones"}, + //{"PATCH", "/repos/:owner/:repo/milestones/:number"}, + {"DELETE", "/repos/:owner/:repo/milestones/:number"}, + + // Miscellaneous + {"GET", "/emojis"}, + {"GET", "/gitignore/templates"}, + {"GET", "/gitignore/templates/:name"}, + {"POST", "/markdown"}, + {"POST", "/markdown/raw"}, + {"GET", "/meta"}, + {"GET", "/rate_limit"}, + + // Organizations + {"GET", "/users/:user/orgs"}, + {"GET", "/user/orgs"}, + {"GET", "/orgs/:org"}, + //{"PATCH", "/orgs/:org"}, + {"GET", "/orgs/:org/members"}, + {"GET", "/orgs/:org/members/:user"}, + {"DELETE", "/orgs/:org/members/:user"}, + {"GET", "/orgs/:org/public_members"}, + {"GET", "/orgs/:org/public_members/:user"}, + {"PUT", "/orgs/:org/public_members/:user"}, + {"DELETE", "/orgs/:org/public_members/:user"}, + {"GET", "/orgs/:org/teams"}, + {"GET", "/teams/:id"}, + {"POST", "/orgs/:org/teams"}, + //{"PATCH", "/teams/:id"}, + {"DELETE", "/teams/:id"}, + {"GET", "/teams/:id/members"}, + {"GET", "/teams/:id/members/:user"}, + {"PUT", "/teams/:id/members/:user"}, + {"DELETE", "/teams/:id/members/:user"}, + {"GET", "/teams/:id/repos"}, + {"GET", "/teams/:id/repos/:owner/:repo"}, + {"PUT", "/teams/:id/repos/:owner/:repo"}, + {"DELETE", "/teams/:id/repos/:owner/:repo"}, + {"GET", "/user/teams"}, + + // Pull Requests + {"GET", "/repos/:owner/:repo/pulls"}, + {"GET", "/repos/:owner/:repo/pulls/:number"}, + {"POST", "/repos/:owner/:repo/pulls"}, + //{"PATCH", "/repos/:owner/:repo/pulls/:number"}, + {"GET", "/repos/:owner/:repo/pulls/:number/commits"}, + {"GET", "/repos/:owner/:repo/pulls/:number/files"}, + {"GET", "/repos/:owner/:repo/pulls/:number/merge"}, + {"PUT", "/repos/:owner/:repo/pulls/:number/merge"}, + {"GET", "/repos/:owner/:repo/pulls/:number/comments"}, + //{"GET", "/repos/:owner/:repo/pulls/comments"}, + //{"GET", "/repos/:owner/:repo/pulls/comments/:number"}, + {"PUT", "/repos/:owner/:repo/pulls/:number/comments"}, + //{"PATCH", "/repos/:owner/:repo/pulls/comments/:number"}, + //{"DELETE", "/repos/:owner/:repo/pulls/comments/:number"}, + + // Repositories + {"GET", "/user/repos"}, + {"GET", "/users/:user/repos"}, + {"GET", "/orgs/:org/repos"}, + {"GET", "/repositories"}, + {"POST", "/user/repos"}, + {"POST", "/orgs/:org/repos"}, + {"GET", "/repos/:owner/:repo"}, + //{"PATCH", "/repos/:owner/:repo"}, + {"GET", "/repos/:owner/:repo/contributors"}, + {"GET", "/repos/:owner/:repo/languages"}, + {"GET", "/repos/:owner/:repo/teams"}, + {"GET", "/repos/:owner/:repo/tags"}, + {"GET", "/repos/:owner/:repo/branches"}, + {"GET", "/repos/:owner/:repo/branches/:branch"}, + {"DELETE", "/repos/:owner/:repo"}, + {"GET", "/repos/:owner/:repo/collaborators"}, + {"GET", "/repos/:owner/:repo/collaborators/:user"}, + {"PUT", "/repos/:owner/:repo/collaborators/:user"}, + {"DELETE", "/repos/:owner/:repo/collaborators/:user"}, + {"GET", "/repos/:owner/:repo/comments"}, + {"GET", "/repos/:owner/:repo/commits/:sha/comments"}, + {"POST", "/repos/:owner/:repo/commits/:sha/comments"}, + {"GET", "/repos/:owner/:repo/comments/:id"}, + //{"PATCH", "/repos/:owner/:repo/comments/:id"}, + {"DELETE", "/repos/:owner/:repo/comments/:id"}, + {"GET", "/repos/:owner/:repo/commits"}, + {"GET", "/repos/:owner/:repo/commits/:sha"}, + {"GET", "/repos/:owner/:repo/readme"}, + //{"GET", "/repos/:owner/:repo/contents/*path"}, + //{"PUT", "/repos/:owner/:repo/contents/*path"}, + //{"DELETE", "/repos/:owner/:repo/contents/*path"}, + //{"GET", "/repos/:owner/:repo/:archive_format/:ref"}, + {"GET", "/repos/:owner/:repo/keys"}, + {"GET", "/repos/:owner/:repo/keys/:id"}, + {"POST", "/repos/:owner/:repo/keys"}, + //{"PATCH", "/repos/:owner/:repo/keys/:id"}, + {"DELETE", "/repos/:owner/:repo/keys/:id"}, + {"GET", "/repos/:owner/:repo/downloads"}, + {"GET", "/repos/:owner/:repo/downloads/:id"}, + {"DELETE", "/repos/:owner/:repo/downloads/:id"}, + {"GET", "/repos/:owner/:repo/forks"}, + {"POST", "/repos/:owner/:repo/forks"}, + {"GET", "/repos/:owner/:repo/hooks"}, + {"GET", "/repos/:owner/:repo/hooks/:id"}, + {"POST", "/repos/:owner/:repo/hooks"}, + //{"PATCH", "/repos/:owner/:repo/hooks/:id"}, + {"POST", "/repos/:owner/:repo/hooks/:id/tests"}, + {"DELETE", "/repos/:owner/:repo/hooks/:id"}, + {"POST", "/repos/:owner/:repo/merges"}, + {"GET", "/repos/:owner/:repo/releases"}, + {"GET", "/repos/:owner/:repo/releases/:id"}, + {"POST", "/repos/:owner/:repo/releases"}, + //{"PATCH", "/repos/:owner/:repo/releases/:id"}, + {"DELETE", "/repos/:owner/:repo/releases/:id"}, + {"GET", "/repos/:owner/:repo/releases/:id/assets"}, + {"GET", "/repos/:owner/:repo/stats/contributors"}, + {"GET", "/repos/:owner/:repo/stats/commit_activity"}, + {"GET", "/repos/:owner/:repo/stats/code_frequency"}, + {"GET", "/repos/:owner/:repo/stats/participation"}, + {"GET", "/repos/:owner/:repo/stats/punch_card"}, + {"GET", "/repos/:owner/:repo/statuses/:ref"}, + {"POST", "/repos/:owner/:repo/statuses/:ref"}, + + // Search + {"GET", "/search/repositories"}, + {"GET", "/search/code"}, + {"GET", "/search/issues"}, + {"GET", "/search/users"}, + {"GET", "/legacy/issues/search/:owner/:repository/:state/:keyword"}, + {"GET", "/legacy/repos/search/:keyword"}, + {"GET", "/legacy/user/search/:keyword"}, + {"GET", "/legacy/user/email/:email"}, + + // Users + {"GET", "/users/:user"}, + {"GET", "/user"}, + //{"PATCH", "/user"}, + {"GET", "/users"}, + {"GET", "/user/emails"}, + {"POST", "/user/emails"}, + {"DELETE", "/user/emails"}, + {"GET", "/users/:user/followers"}, + {"GET", "/user/followers"}, + {"GET", "/users/:user/following"}, + {"GET", "/user/following"}, + {"GET", "/user/following/:user"}, + {"GET", "/users/:user/following/:target_user"}, + {"PUT", "/user/following/:user"}, + {"DELETE", "/user/following/:user"}, + {"GET", "/users/:user/keys"}, + {"GET", "/user/keys"}, + {"GET", "/user/keys/:id"}, + {"POST", "/user/keys"}, + //{"PATCH", "/user/keys/:id"}, + {"DELETE", "/user/keys/:id"}, +} + +func githubConfigRouter(router *Engine) { + for _, route := range githubAPI { + router.Handle(route.method, route.path, func(c *Context) { + output := make(map[string]string, len(c.Params)+1) + output["status"] = "good" + for _, param := range c.Params { + output[param.Key] = param.Value + } + c.JSON(200, output) + }) + } +} + +func TestGithubAPI(t *testing.T) { + DefaultWriter = os.Stdout + router := Default() + githubConfigRouter(router) + + for _, route := range githubAPI { + path, values := exampleFromPath(route.path) + w := performRequest(router, route.method, path) + + // TEST + assert.Contains(t, w.Body.String(), "\"status\":\"good\"") + for _, value := range values { + str := fmt.Sprintf("\"%s\":\"%s\"", value.Key, value.Value) + assert.Contains(t, w.Body.String(), str) + } + } +} + +func exampleFromPath(path string) (string, Params) { + output := new(bytes.Buffer) + params := make(Params, 0, 6) + start := -1 + for i, c := range path { + if c == ':' { + start = i + 1 + } + if start >= 0 { + if c == '/' { + value := fmt.Sprint(rand.Intn(100000)) + params = append(params, Param{ + Key: path[start:i], + Value: value, + }) + output.WriteString(value) + output.WriteRune(c) + start = -1 + } + } else { + output.WriteRune(c) + } + } + if start >= 0 { + value := fmt.Sprint(rand.Intn(100000)) + params = append(params, Param{ + Key: path[start:], + Value: value, + }) + output.WriteString(value) + } + + return output.String(), params +} + +func BenchmarkGithub(b *testing.B) { + router := New() + githubConfigRouter(router) + runRequest(b, router, "GET", "/legacy/issues/search/:owner/:repository/:state/:keyword") +} + +func BenchmarkParallelGithub(b *testing.B) { + DefaultWriter = os.Stdout + router := New() + githubConfigRouter(router) + + req, _ := http.NewRequest("POST", "/repos/manucorporat/sse/git/blobs", nil) + + b.RunParallel(func(pb *testing.PB) { + // Each goroutine has its own bytes.Buffer. + for pb.Next() { + w := httptest.NewRecorder() + router.ServeHTTP(w, req) + } + }) +} + +func BenchmarkParallelGithubDefault(b *testing.B) { + DefaultWriter = os.Stdout + router := Default() + githubConfigRouter(router) + + req, _ := http.NewRequest("POST", "/repos/manucorporat/sse/git/blobs", nil) + + b.RunParallel(func(pb *testing.PB) { + // Each goroutine has its own bytes.Buffer. + for pb.Next() { + w := httptest.NewRecorder() + router.ServeHTTP(w, req) + } + }) +} diff --git a/vendor/github.com/gin-gonic/gin/json/json.go b/vendor/github.com/gin-gonic/gin/json/json.go new file mode 100644 index 000000000..aa76aa309 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/json/json.go @@ -0,0 +1,15 @@ +// Copyright 2017 Bo-Yi Wu. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +// +build !jsoniter + +package json + +import "encoding/json" + +var ( + Marshal = json.Marshal + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder +) diff --git a/vendor/github.com/gin-gonic/gin/json/jsoniter.go b/vendor/github.com/gin-gonic/gin/json/jsoniter.go new file mode 100644 index 000000000..ffe1424ac --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/json/jsoniter.go @@ -0,0 +1,16 @@ +// Copyright 2017 Bo-Yi Wu. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +// +build jsoniter + +package json + +import "github.com/json-iterator/go" + +var ( + json = jsoniter.ConfigCompatibleWithStandardLibrary + Marshal = json.Marshal + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder +) diff --git a/vendor/github.com/gin-gonic/gin/logger.go b/vendor/github.com/gin-gonic/gin/logger.go new file mode 100644 index 000000000..c679c7871 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/logger.go @@ -0,0 +1,151 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "fmt" + "io" + "os" + "time" + + "github.com/mattn/go-isatty" +) + +var ( + green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109}) + white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109}) + yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109}) + red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109}) + blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109}) + magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109}) + cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109}) + reset = string([]byte{27, 91, 48, 109}) + disableColor = false +) + +// DisableConsoleColor disables color output in the console. +func DisableConsoleColor() { + disableColor = true +} + +// ErrorLogger returns a handlerfunc for any error type. +func ErrorLogger() HandlerFunc { + return ErrorLoggerT(ErrorTypeAny) +} + +// ErrorLoggerT returns a handlerfunc for a given error type. +func ErrorLoggerT(typ ErrorType) HandlerFunc { + return func(c *Context) { + c.Next() + errors := c.Errors.ByType(typ) + if len(errors) > 0 { + c.JSON(-1, errors) + } + } +} + +// Logger instances a Logger middleware that will write the logs to gin.DefaultWriter. +// By default gin.DefaultWriter = os.Stdout. +func Logger() HandlerFunc { + return LoggerWithWriter(DefaultWriter) +} + +// LoggerWithWriter instance a Logger middleware with the specified writter buffer. +// Example: os.Stdout, a file opened in write mode, a socket... +func LoggerWithWriter(out io.Writer, notlogged ...string) HandlerFunc { + isTerm := true + + if w, ok := out.(*os.File); !ok || + (os.Getenv("TERM") == "dumb" || (!isatty.IsTerminal(w.Fd()) && !isatty.IsCygwinTerminal(w.Fd()))) || + disableColor { + isTerm = false + } + + var skip map[string]struct{} + + if length := len(notlogged); length > 0 { + skip = make(map[string]struct{}, length) + + for _, path := range notlogged { + skip[path] = struct{}{} + } + } + + return func(c *Context) { + // Start timer + start := time.Now() + path := c.Request.URL.Path + raw := c.Request.URL.RawQuery + + // Process request + c.Next() + + // Log only when path is not being skipped + if _, ok := skip[path]; !ok { + // Stop timer + end := time.Now() + latency := end.Sub(start) + + clientIP := c.ClientIP() + method := c.Request.Method + statusCode := c.Writer.Status() + var statusColor, methodColor, resetColor string + if isTerm { + statusColor = colorForStatus(statusCode) + methodColor = colorForMethod(method) + resetColor = reset + } + comment := c.Errors.ByType(ErrorTypePrivate).String() + + if raw != "" { + path = path + "?" + raw + } + + fmt.Fprintf(out, "[GIN] %v |%s %3d %s| %13v | %15s |%s %-7s %s %s\n%s", + end.Format("2006/01/02 - 15:04:05"), + statusColor, statusCode, resetColor, + latency, + clientIP, + methodColor, method, resetColor, + path, + comment, + ) + } + } +} + +func colorForStatus(code int) string { + switch { + case code >= 200 && code < 300: + return green + case code >= 300 && code < 400: + return white + case code >= 400 && code < 500: + return yellow + default: + return red + } +} + +func colorForMethod(method string) string { + switch method { + case "GET": + return blue + case "POST": + return cyan + case "PUT": + return yellow + case "DELETE": + return red + case "PATCH": + return green + case "HEAD": + return magenta + case "OPTIONS": + return white + default: + return reset + } +} diff --git a/vendor/github.com/gin-gonic/gin/logger_test.go b/vendor/github.com/gin-gonic/gin/logger_test.go new file mode 100644 index 000000000..74a9659c0 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/logger_test.go @@ -0,0 +1,149 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bytes" + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func init() { + SetMode(TestMode) +} + +func TestLogger(t *testing.T) { + buffer := new(bytes.Buffer) + router := New() + router.Use(LoggerWithWriter(buffer)) + router.GET("/example", func(c *Context) {}) + router.POST("/example", func(c *Context) {}) + router.PUT("/example", func(c *Context) {}) + router.DELETE("/example", func(c *Context) {}) + router.PATCH("/example", func(c *Context) {}) + router.HEAD("/example", func(c *Context) {}) + router.OPTIONS("/example", func(c *Context) {}) + + performRequest(router, "GET", "/example?a=100") + assert.Contains(t, buffer.String(), "200") + assert.Contains(t, buffer.String(), "GET") + assert.Contains(t, buffer.String(), "/example") + assert.Contains(t, buffer.String(), "a=100") + + // I wrote these first (extending the above) but then realized they are more + // like integration tests because they test the whole logging process rather + // than individual functions. Im not sure where these should go. + buffer.Reset() + performRequest(router, "POST", "/example") + assert.Contains(t, buffer.String(), "200") + assert.Contains(t, buffer.String(), "POST") + assert.Contains(t, buffer.String(), "/example") + + buffer.Reset() + performRequest(router, "PUT", "/example") + assert.Contains(t, buffer.String(), "200") + assert.Contains(t, buffer.String(), "PUT") + assert.Contains(t, buffer.String(), "/example") + + buffer.Reset() + performRequest(router, "DELETE", "/example") + assert.Contains(t, buffer.String(), "200") + assert.Contains(t, buffer.String(), "DELETE") + assert.Contains(t, buffer.String(), "/example") + + buffer.Reset() + performRequest(router, "PATCH", "/example") + assert.Contains(t, buffer.String(), "200") + assert.Contains(t, buffer.String(), "PATCH") + assert.Contains(t, buffer.String(), "/example") + + buffer.Reset() + performRequest(router, "HEAD", "/example") + assert.Contains(t, buffer.String(), "200") + assert.Contains(t, buffer.String(), "HEAD") + assert.Contains(t, buffer.String(), "/example") + + buffer.Reset() + performRequest(router, "OPTIONS", "/example") + assert.Contains(t, buffer.String(), "200") + assert.Contains(t, buffer.String(), "OPTIONS") + assert.Contains(t, buffer.String(), "/example") + + buffer.Reset() + performRequest(router, "GET", "/notfound") + assert.Contains(t, buffer.String(), "404") + assert.Contains(t, buffer.String(), "GET") + assert.Contains(t, buffer.String(), "/notfound") + +} + +func TestColorForMethod(t *testing.T) { + assert.Equal(t, string([]byte{27, 91, 57, 55, 59, 52, 52, 109}), colorForMethod("GET"), "get should be blue") + assert.Equal(t, string([]byte{27, 91, 57, 55, 59, 52, 54, 109}), colorForMethod("POST"), "post should be cyan") + assert.Equal(t, string([]byte{27, 91, 57, 55, 59, 52, 51, 109}), colorForMethod("PUT"), "put should be yellow") + assert.Equal(t, string([]byte{27, 91, 57, 55, 59, 52, 49, 109}), colorForMethod("DELETE"), "delete should be red") + assert.Equal(t, string([]byte{27, 91, 57, 55, 59, 52, 50, 109}), colorForMethod("PATCH"), "patch should be green") + assert.Equal(t, string([]byte{27, 91, 57, 55, 59, 52, 53, 109}), colorForMethod("HEAD"), "head should be magenta") + assert.Equal(t, string([]byte{27, 91, 57, 48, 59, 52, 55, 109}), colorForMethod("OPTIONS"), "options should be white") + assert.Equal(t, string([]byte{27, 91, 48, 109}), colorForMethod("TRACE"), "trace is not defined and should be the reset color") +} + +func TestColorForStatus(t *testing.T) { + assert.Equal(t, string([]byte{27, 91, 57, 55, 59, 52, 50, 109}), colorForStatus(200), "2xx should be green") + assert.Equal(t, string([]byte{27, 91, 57, 48, 59, 52, 55, 109}), colorForStatus(301), "3xx should be white") + assert.Equal(t, string([]byte{27, 91, 57, 55, 59, 52, 51, 109}), colorForStatus(404), "4xx should be yellow") + assert.Equal(t, string([]byte{27, 91, 57, 55, 59, 52, 49, 109}), colorForStatus(2), "other things should be red") +} + +func TestErrorLogger(t *testing.T) { + router := New() + router.Use(ErrorLogger()) + router.GET("/error", func(c *Context) { + c.Error(errors.New("this is an error")) + }) + router.GET("/abort", func(c *Context) { + c.AbortWithError(401, errors.New("no authorized")) + }) + router.GET("/print", func(c *Context) { + c.Error(errors.New("this is an error")) + c.String(500, "hola!") + }) + + w := performRequest(router, "GET", "/error") + assert.Equal(t, 200, w.Code) + assert.Equal(t, "{\"error\":\"this is an error\"}", w.Body.String()) + + w = performRequest(router, "GET", "/abort") + assert.Equal(t, 401, w.Code) + assert.Equal(t, "{\"error\":\"no authorized\"}", w.Body.String()) + + w = performRequest(router, "GET", "/print") + assert.Equal(t, 500, w.Code) + assert.Equal(t, "hola!{\"error\":\"this is an error\"}", w.Body.String()) +} + +func TestSkippingPaths(t *testing.T) { + buffer := new(bytes.Buffer) + router := New() + router.Use(LoggerWithWriter(buffer, "/skipped")) + router.GET("/logged", func(c *Context) {}) + router.GET("/skipped", func(c *Context) {}) + + performRequest(router, "GET", "/logged") + assert.Contains(t, buffer.String(), "200") + + buffer.Reset() + performRequest(router, "GET", "/skipped") + assert.Contains(t, buffer.String(), "") +} + +func TestDisableConsoleColor(t *testing.T) { + New() + assert.False(t, disableColor) + DisableConsoleColor() + assert.True(t, disableColor) +} diff --git a/vendor/github.com/gin-gonic/gin/middleware_test.go b/vendor/github.com/gin-gonic/gin/middleware_test.go new file mode 100644 index 000000000..aa6a37a89 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/middleware_test.go @@ -0,0 +1,249 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "errors" + "strings" + "testing" + + "github.com/gin-contrib/sse" + "github.com/stretchr/testify/assert" +) + +func TestMiddlewareGeneralCase(t *testing.T) { + signature := "" + router := New() + router.Use(func(c *Context) { + signature += "A" + c.Next() + signature += "B" + }) + router.Use(func(c *Context) { + signature += "C" + }) + router.GET("/", func(c *Context) { + signature += "D" + }) + router.NoRoute(func(c *Context) { + signature += " X " + }) + router.NoMethod(func(c *Context) { + signature += " XX " + }) + // RUN + w := performRequest(router, "GET", "/") + + // TEST + assert.Equal(t, 200, w.Code) + assert.Equal(t, "ACDB", signature) +} + +func TestMiddlewareNoRoute(t *testing.T) { + signature := "" + router := New() + router.Use(func(c *Context) { + signature += "A" + c.Next() + signature += "B" + }) + router.Use(func(c *Context) { + signature += "C" + c.Next() + c.Next() + c.Next() + c.Next() + signature += "D" + }) + router.NoRoute(func(c *Context) { + signature += "E" + c.Next() + signature += "F" + }, func(c *Context) { + signature += "G" + c.Next() + signature += "H" + }) + router.NoMethod(func(c *Context) { + signature += " X " + }) + // RUN + w := performRequest(router, "GET", "/") + + // TEST + assert.Equal(t, 404, w.Code) + assert.Equal(t, "ACEGHFDB", signature) +} + +func TestMiddlewareNoMethodEnabled(t *testing.T) { + signature := "" + router := New() + router.HandleMethodNotAllowed = true + router.Use(func(c *Context) { + signature += "A" + c.Next() + signature += "B" + }) + router.Use(func(c *Context) { + signature += "C" + c.Next() + signature += "D" + }) + router.NoMethod(func(c *Context) { + signature += "E" + c.Next() + signature += "F" + }, func(c *Context) { + signature += "G" + c.Next() + signature += "H" + }) + router.NoRoute(func(c *Context) { + signature += " X " + }) + router.POST("/", func(c *Context) { + signature += " XX " + }) + // RUN + w := performRequest(router, "GET", "/") + + // TEST + assert.Equal(t, 405, w.Code) + assert.Equal(t, "ACEGHFDB", signature) +} + +func TestMiddlewareNoMethodDisabled(t *testing.T) { + signature := "" + router := New() + router.HandleMethodNotAllowed = false + router.Use(func(c *Context) { + signature += "A" + c.Next() + signature += "B" + }) + router.Use(func(c *Context) { + signature += "C" + c.Next() + signature += "D" + }) + router.NoMethod(func(c *Context) { + signature += "E" + c.Next() + signature += "F" + }, func(c *Context) { + signature += "G" + c.Next() + signature += "H" + }) + router.NoRoute(func(c *Context) { + signature += " X " + }) + router.POST("/", func(c *Context) { + signature += " XX " + }) + // RUN + w := performRequest(router, "GET", "/") + + // TEST + assert.Equal(t, 404, w.Code) + assert.Equal(t, "AC X DB", signature) +} + +func TestMiddlewareAbort(t *testing.T) { + signature := "" + router := New() + router.Use(func(c *Context) { + signature += "A" + }) + router.Use(func(c *Context) { + signature += "C" + c.AbortWithStatus(401) + c.Next() + signature += "D" + }) + router.GET("/", func(c *Context) { + signature += " X " + c.Next() + signature += " XX " + }) + + // RUN + w := performRequest(router, "GET", "/") + + // TEST + assert.Equal(t, 401, w.Code) + assert.Equal(t, "ACD", signature) +} + +func TestMiddlewareAbortHandlersChainAndNext(t *testing.T) { + signature := "" + router := New() + router.Use(func(c *Context) { + signature += "A" + c.Next() + c.AbortWithStatus(410) + signature += "B" + + }) + router.GET("/", func(c *Context) { + signature += "C" + c.Next() + }) + // RUN + w := performRequest(router, "GET", "/") + + // TEST + assert.Equal(t, 410, w.Code) + assert.Equal(t, "ACB", signature) +} + +// TestFailHandlersChain - ensure that Fail interrupt used middleware in fifo order as +// as well as Abort +func TestMiddlewareFailHandlersChain(t *testing.T) { + // SETUP + signature := "" + router := New() + router.Use(func(context *Context) { + signature += "A" + context.AbortWithError(500, errors.New("foo")) + }) + router.Use(func(context *Context) { + signature += "B" + context.Next() + signature += "C" + }) + // RUN + w := performRequest(router, "GET", "/") + + // TEST + assert.Equal(t, 500, w.Code) + assert.Equal(t, "A", signature) +} + +func TestMiddlewareWrite(t *testing.T) { + router := New() + router.Use(func(c *Context) { + c.String(400, "hola\n") + }) + router.Use(func(c *Context) { + c.XML(400, H{"foo": "bar"}) + }) + router.Use(func(c *Context) { + c.JSON(400, H{"foo": "bar"}) + }) + router.GET("/", func(c *Context) { + c.JSON(400, H{"foo": "bar"}) + }, func(c *Context) { + c.Render(400, sse.Event{ + Event: "test", + Data: "message", + }) + }) + + w := performRequest(router, "GET", "/") + + assert.Equal(t, 400, w.Code) + assert.Equal(t, strings.Replace("hola\nbar{\"foo\":\"bar\"}{\"foo\":\"bar\"}event:test\ndata:message\n\n", " ", "", -1), strings.Replace(w.Body.String(), " ", "", -1)) +} diff --git a/vendor/github.com/gin-gonic/gin/mode.go b/vendor/github.com/gin-gonic/gin/mode.go new file mode 100644 index 000000000..9df4e45fa --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/mode.go @@ -0,0 +1,72 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "io" + "os" + + "github.com/gin-gonic/gin/binding" +) + +const ENV_GIN_MODE = "GIN_MODE" + +const ( + DebugMode = "debug" + ReleaseMode = "release" + TestMode = "test" +) +const ( + debugCode = iota + releaseCode + testCode +) + +// DefaultWriter is the default io.Writer used the Gin for debug output and +// middleware output like Logger() or Recovery(). +// Note that both Logger and Recovery provides custom ways to configure their +// output io.Writer. +// To support coloring in Windows use: +// import "github.com/mattn/go-colorable" +// gin.DefaultWriter = colorable.NewColorableStdout() +var DefaultWriter io.Writer = os.Stdout +var DefaultErrorWriter io.Writer = os.Stderr + +var ginMode = debugCode +var modeName = DebugMode + +func init() { + mode := os.Getenv(ENV_GIN_MODE) + SetMode(mode) +} + +func SetMode(value string) { + switch value { + case DebugMode, "": + ginMode = debugCode + case ReleaseMode: + ginMode = releaseCode + case TestMode: + ginMode = testCode + default: + panic("gin mode unknown: " + value) + } + if value == "" { + value = DebugMode + } + modeName = value +} + +func DisableBindValidation() { + binding.Validator = nil +} + +func EnableJsonDecoderUseNumber() { + binding.EnableDecoderUseNumber = true +} + +func Mode() string { + return modeName +} diff --git a/vendor/github.com/gin-gonic/gin/mode_test.go b/vendor/github.com/gin-gonic/gin/mode_test.go new file mode 100644 index 000000000..cf27acd86 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/mode_test.go @@ -0,0 +1,47 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "os" + "testing" + + "github.com/gin-gonic/gin/binding" + "github.com/stretchr/testify/assert" +) + +func init() { + os.Setenv(ENV_GIN_MODE, TestMode) +} + +func TestSetMode(t *testing.T) { + assert.Equal(t, testCode, ginMode) + assert.Equal(t, TestMode, Mode()) + os.Unsetenv(ENV_GIN_MODE) + + SetMode("") + assert.Equal(t, debugCode, ginMode) + assert.Equal(t, DebugMode, Mode()) + + SetMode(DebugMode) + assert.Equal(t, debugCode, ginMode) + assert.Equal(t, DebugMode, Mode()) + + SetMode(ReleaseMode) + assert.Equal(t, releaseCode, ginMode) + assert.Equal(t, ReleaseMode, Mode()) + + SetMode(TestMode) + assert.Equal(t, testCode, ginMode) + assert.Equal(t, TestMode, Mode()) + + assert.Panics(t, func() { SetMode("unknown") }) +} + +func TestEnableJsonDecoderUseNumber(t *testing.T) { + assert.False(t, binding.EnableDecoderUseNumber) + EnableJsonDecoderUseNumber() + assert.True(t, binding.EnableDecoderUseNumber) +} diff --git a/vendor/github.com/gin-gonic/gin/path.go b/vendor/github.com/gin-gonic/gin/path.go new file mode 100644 index 000000000..ed63ad1aa --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/path.go @@ -0,0 +1,123 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Based on the path package, Copyright 2009 The Go Authors. +// Use of this source code is governed by a BSD-style license that can be found +// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE. + +package gin + +// cleanPath is the URL version of path.Clean, it returns a canonical URL path +// for p, eliminating . and .. elements. +// +// The following rules are applied iteratively until no further processing can +// be done: +// 1. Replace multiple slashes with a single slash. +// 2. Eliminate each . path name element (the current directory). +// 3. Eliminate each inner .. path name element (the parent directory) +// along with the non-.. element that precedes it. +// 4. Eliminate .. elements that begin a rooted path: +// that is, replace "/.." by "/" at the beginning of a path. +// +// If the result of this process is an empty string, "/" is returned. +func cleanPath(p string) string { + // Turn empty string into "/" + if p == "" { + return "/" + } + + n := len(p) + var buf []byte + + // Invariants: + // reading from path; r is index of next byte to process. + // writing to buf; w is index of next byte to write. + + // path must start with '/' + r := 1 + w := 1 + + if p[0] != '/' { + r = 0 + buf = make([]byte, n+1) + buf[0] = '/' + } + + trailing := n > 2 && p[n-1] == '/' + + // A bit more clunky without a 'lazybuf' like the path package, but the loop + // gets completely inlined (bufApp). So in contrast to the path package this + // loop has no expensive function calls (except 1x make) + + for r < n { + switch { + case p[r] == '/': + // empty path element, trailing slash is added after the end + r++ + + case p[r] == '.' && r+1 == n: + trailing = true + r++ + + case p[r] == '.' && p[r+1] == '/': + // . element + r++ + + case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'): + // .. element: remove to last / + r += 2 + + if w > 1 { + // can backtrack + w-- + + if buf == nil { + for w > 1 && p[w] != '/' { + w-- + } + } else { + for w > 1 && buf[w] != '/' { + w-- + } + } + } + + default: + // real path element. + // add slash if needed + if w > 1 { + bufApp(&buf, p, w, '/') + w++ + } + + // copy element + for r < n && p[r] != '/' { + bufApp(&buf, p, w, p[r]) + w++ + r++ + } + } + } + + // re-append trailing slash + if trailing && w > 1 { + bufApp(&buf, p, w, '/') + w++ + } + + if buf == nil { + return p[:w] + } + return string(buf[:w]) +} + +// internal helper to lazily create a buffer if necessary. +func bufApp(buf *[]byte, s string, w int, c byte) { + if *buf == nil { + if s[w] == c { + return + } + + *buf = make([]byte, len(s)) + copy(*buf, s[:w]) + } + (*buf)[w] = c +} diff --git a/vendor/github.com/gin-gonic/gin/path_test.go b/vendor/github.com/gin-gonic/gin/path_test.go new file mode 100644 index 000000000..4a6d945b6 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/path_test.go @@ -0,0 +1,88 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Based on the path package, Copyright 2009 The Go Authors. +// Use of this source code is governed by a BSD-style license that can be found +// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE + +package gin + +import ( + "runtime" + "testing" + + "github.com/stretchr/testify/assert" +) + +var cleanTests = []struct { + path, result string +}{ + // Already clean + {"/", "/"}, + {"/abc", "/abc"}, + {"/a/b/c", "/a/b/c"}, + {"/abc/", "/abc/"}, + {"/a/b/c/", "/a/b/c/"}, + + // missing root + {"", "/"}, + {"abc", "/abc"}, + {"abc/def", "/abc/def"}, + {"a/b/c", "/a/b/c"}, + + // Remove doubled slash + {"//", "/"}, + {"/abc//", "/abc/"}, + {"/abc/def//", "/abc/def/"}, + {"/a/b/c//", "/a/b/c/"}, + {"/abc//def//ghi", "/abc/def/ghi"}, + {"//abc", "/abc"}, + {"///abc", "/abc"}, + {"//abc//", "/abc/"}, + + // Remove . elements + {".", "/"}, + {"./", "/"}, + {"/abc/./def", "/abc/def"}, + {"/./abc/def", "/abc/def"}, + {"/abc/.", "/abc/"}, + + // Remove .. elements + {"..", "/"}, + {"../", "/"}, + {"../../", "/"}, + {"../..", "/"}, + {"../../abc", "/abc"}, + {"/abc/def/ghi/../jkl", "/abc/def/jkl"}, + {"/abc/def/../ghi/../jkl", "/abc/jkl"}, + {"/abc/def/..", "/abc"}, + {"/abc/def/../..", "/"}, + {"/abc/def/../../..", "/"}, + {"/abc/def/../../..", "/"}, + {"/abc/def/../../../ghi/jkl/../../../mno", "/mno"}, + + // Combinations + {"abc/./../def", "/def"}, + {"abc//./../def", "/def"}, + {"abc/../../././../def", "/def"}, +} + +func TestPathClean(t *testing.T) { + for _, test := range cleanTests { + assert.Equal(t, test.result, cleanPath(test.path)) + assert.Equal(t, test.result, cleanPath(test.result)) + } +} + +func TestPathCleanMallocs(t *testing.T) { + if testing.Short() { + t.Skip("skipping malloc count in short mode") + } + if runtime.GOMAXPROCS(0) > 1 { + t.Log("skipping AllocsPerRun checks; GOMAXPROCS>1") + return + } + + for _, test := range cleanTests { + allocs := testing.AllocsPerRun(100, func() { cleanPath(test.result) }) + assert.EqualValues(t, allocs, 0) + } +} diff --git a/vendor/github.com/gin-gonic/gin/recovery.go b/vendor/github.com/gin-gonic/gin/recovery.go new file mode 100644 index 000000000..89b39fecd --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/recovery.go @@ -0,0 +1,109 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http/httputil" + "runtime" +) + +var ( + dunno = []byte("???") + centerDot = []byte("·") + dot = []byte(".") + slash = []byte("/") +) + +// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one. +func Recovery() HandlerFunc { + return RecoveryWithWriter(DefaultErrorWriter) +} + +// RecoveryWithWriter returns a middleware for a given writer that recovers from any panics and writes a 500 if there was one. +func RecoveryWithWriter(out io.Writer) HandlerFunc { + var logger *log.Logger + if out != nil { + logger = log.New(out, "\n\n\x1b[31m", log.LstdFlags) + } + return func(c *Context) { + defer func() { + if err := recover(); err != nil { + if logger != nil { + stack := stack(3) + httprequest, _ := httputil.DumpRequest(c.Request, false) + logger.Printf("[Recovery] panic recovered:\n%s\n%s\n%s%s", string(httprequest), err, stack, reset) + } + c.AbortWithStatus(500) + } + }() + c.Next() + } +} + +// stack returns a nicely formatted stack frame, skipping skip frames. +func stack(skip int) []byte { + buf := new(bytes.Buffer) // the returned data + // As we loop, we open files and read them. These variables record the currently + // loaded file. + var lines [][]byte + var lastFile string + for i := skip; ; i++ { // Skip the expected number of frames + pc, file, line, ok := runtime.Caller(i) + if !ok { + break + } + // Print this much at least. If we can't find the source, it won't show. + fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc) + if file != lastFile { + data, err := ioutil.ReadFile(file) + if err != nil { + continue + } + lines = bytes.Split(data, []byte{'\n'}) + lastFile = file + } + fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line)) + } + return buf.Bytes() +} + +// source returns a space-trimmed slice of the n'th line. +func source(lines [][]byte, n int) []byte { + n-- // in stack trace, lines are 1-indexed but our array is 0-indexed + if n < 0 || n >= len(lines) { + return dunno + } + return bytes.TrimSpace(lines[n]) +} + +// function returns, if possible, the name of the function containing the PC. +func function(pc uintptr) []byte { + fn := runtime.FuncForPC(pc) + if fn == nil { + return dunno + } + name := []byte(fn.Name()) + // The name includes the path name to the package, which is unnecessary + // since the file name is already included. Plus, it has center dots. + // That is, we see + // runtime/debug.*T·ptrmethod + // and want + // *T.ptrmethod + // Also the package path might contains dot (e.g. code.google.com/...), + // so first eliminate the path prefix + if lastslash := bytes.LastIndex(name, slash); lastslash >= 0 { + name = name[lastslash+1:] + } + if period := bytes.Index(name, dot); period >= 0 { + name = name[period+1:] + } + name = bytes.Replace(name, centerDot, dot, -1) + return name +} diff --git a/vendor/github.com/gin-gonic/gin/recovery_test.go b/vendor/github.com/gin-gonic/gin/recovery_test.go new file mode 100644 index 000000000..de3b62a50 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/recovery_test.go @@ -0,0 +1,43 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestPanicInHandler assert that panic has been recovered. +func TestPanicInHandler(t *testing.T) { + buffer := new(bytes.Buffer) + router := New() + router.Use(RecoveryWithWriter(buffer)) + router.GET("/recovery", func(_ *Context) { + panic("Oupps, Houston, we have a problem") + }) + // RUN + w := performRequest(router, "GET", "/recovery") + // TEST + assert.Equal(t, 500, w.Code) + assert.Contains(t, buffer.String(), "GET /recovery") + assert.Contains(t, buffer.String(), "Oupps, Houston, we have a problem") + assert.Contains(t, buffer.String(), "TestPanicInHandler") +} + +// TestPanicWithAbort assert that panic has been recovered even if context.Abort was used. +func TestPanicWithAbort(t *testing.T) { + router := New() + router.Use(RecoveryWithWriter(nil)) + router.GET("/recovery", func(c *Context) { + c.AbortWithStatus(400) + panic("Oupps, Houston, we have a problem") + }) + // RUN + w := performRequest(router, "GET", "/recovery") + // TEST + assert.Equal(t, 400, w.Code) +} diff --git a/vendor/github.com/gin-gonic/gin/render/data.go b/vendor/github.com/gin-gonic/gin/render/data.go new file mode 100644 index 000000000..331949139 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/render/data.go @@ -0,0 +1,23 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import "net/http" + +type Data struct { + ContentType string + Data []byte +} + +// Render (Data) writes data with custom ContentType. +func (r Data) Render(w http.ResponseWriter) (err error) { + r.WriteContentType(w) + _, err = w.Write(r.Data) + return +} + +func (r Data) WriteContentType(w http.ResponseWriter) { + writeContentType(w, []string{r.ContentType}) +} diff --git a/vendor/github.com/gin-gonic/gin/render/html.go b/vendor/github.com/gin-gonic/gin/render/html.go new file mode 100644 index 000000000..1e3be65b9 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/render/html.go @@ -0,0 +1,80 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "html/template" + "net/http" +) + +type Delims struct { + Left string + Right string +} + +type HTMLRender interface { + Instance(string, interface{}) Render +} + +type HTMLProduction struct { + Template *template.Template + Delims Delims +} + +type HTMLDebug struct { + Files []string + Glob string + Delims Delims + FuncMap template.FuncMap +} + +type HTML struct { + Template *template.Template + Name string + Data interface{} +} + +var htmlContentType = []string{"text/html; charset=utf-8"} + +func (r HTMLProduction) Instance(name string, data interface{}) Render { + return HTML{ + Template: r.Template, + Name: name, + Data: data, + } +} + +func (r HTMLDebug) Instance(name string, data interface{}) Render { + return HTML{ + Template: r.loadTemplate(), + Name: name, + Data: data, + } +} +func (r HTMLDebug) loadTemplate() *template.Template { + if r.FuncMap == nil { + r.FuncMap = template.FuncMap{} + } + if len(r.Files) > 0 { + return template.Must(template.New("").Delims(r.Delims.Left, r.Delims.Right).Funcs(r.FuncMap).ParseFiles(r.Files...)) + } + if r.Glob != "" { + return template.Must(template.New("").Delims(r.Delims.Left, r.Delims.Right).Funcs(r.FuncMap).ParseGlob(r.Glob)) + } + panic("the HTML debug render was created without files or glob pattern") +} + +func (r HTML) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + + if r.Name == "" { + return r.Template.Execute(w, r.Data) + } + return r.Template.ExecuteTemplate(w, r.Name, r.Data) +} + +func (r HTML) WriteContentType(w http.ResponseWriter) { + writeContentType(w, htmlContentType) +} diff --git a/vendor/github.com/gin-gonic/gin/render/json.go b/vendor/github.com/gin-gonic/gin/render/json.go new file mode 100644 index 000000000..eb2548e2a --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/render/json.go @@ -0,0 +1,82 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "bytes" + "net/http" + + "github.com/gin-gonic/gin/json" +) + +type JSON struct { + Data interface{} +} + +type IndentedJSON struct { + Data interface{} +} + +type SecureJSON struct { + Prefix string + Data interface{} +} + +type SecureJSONPrefix string + +var jsonContentType = []string{"application/json; charset=utf-8"} + +func (r JSON) Render(w http.ResponseWriter) (err error) { + if err = WriteJSON(w, r.Data); err != nil { + panic(err) + } + return +} + +func (r JSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonContentType) +} + +func WriteJSON(w http.ResponseWriter, obj interface{}) error { + writeContentType(w, jsonContentType) + jsonBytes, err := json.Marshal(obj) + if err != nil { + return err + } + w.Write(jsonBytes) + return nil +} + +func (r IndentedJSON) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + jsonBytes, err := json.MarshalIndent(r.Data, "", " ") + if err != nil { + return err + } + w.Write(jsonBytes) + return nil +} + +func (r IndentedJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonContentType) +} + +func (r SecureJSON) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + jsonBytes, err := json.Marshal(r.Data) + if err != nil { + return err + } + // if the jsonBytes is array values + if bytes.HasPrefix(jsonBytes, []byte("[")) && bytes.HasSuffix(jsonBytes, []byte("]")) { + w.Write([]byte(r.Prefix)) + } + w.Write(jsonBytes) + return nil +} + +func (r SecureJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonContentType) +} diff --git a/vendor/github.com/gin-gonic/gin/render/msgpack.go b/vendor/github.com/gin-gonic/gin/render/msgpack.go new file mode 100644 index 000000000..e6c13e588 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/render/msgpack.go @@ -0,0 +1,31 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "net/http" + + "github.com/ugorji/go/codec" +) + +type MsgPack struct { + Data interface{} +} + +var msgpackContentType = []string{"application/msgpack; charset=utf-8"} + +func (r MsgPack) WriteContentType(w http.ResponseWriter) { + writeContentType(w, msgpackContentType) +} + +func (r MsgPack) Render(w http.ResponseWriter) error { + return WriteMsgPack(w, r.Data) +} + +func WriteMsgPack(w http.ResponseWriter, obj interface{}) error { + writeContentType(w, msgpackContentType) + var h codec.Handle = new(codec.MsgpackHandle) + return codec.NewEncoder(w, h).Encode(obj) +} diff --git a/vendor/github.com/gin-gonic/gin/render/redirect.go b/vendor/github.com/gin-gonic/gin/render/redirect.go new file mode 100644 index 000000000..f874a3515 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/render/redirect.go @@ -0,0 +1,26 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "fmt" + "net/http" +) + +type Redirect struct { + Code int + Request *http.Request + Location string +} + +func (r Redirect) Render(w http.ResponseWriter) error { + if (r.Code < 300 || r.Code > 308) && r.Code != 201 { + panic(fmt.Sprintf("Cannot redirect with status code %d", r.Code)) + } + http.Redirect(w, r.Request, r.Location, r.Code) + return nil +} + +func (r Redirect) WriteContentType(http.ResponseWriter) {} diff --git a/vendor/github.com/gin-gonic/gin/render/render.go b/vendor/github.com/gin-gonic/gin/render/render.go new file mode 100644 index 000000000..718523642 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/render/render.go @@ -0,0 +1,34 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import "net/http" + +type Render interface { + Render(http.ResponseWriter) error + WriteContentType(w http.ResponseWriter) +} + +var ( + _ Render = JSON{} + _ Render = IndentedJSON{} + _ Render = SecureJSON{} + _ Render = XML{} + _ Render = String{} + _ Render = Redirect{} + _ Render = Data{} + _ Render = HTML{} + _ HTMLRender = HTMLDebug{} + _ HTMLRender = HTMLProduction{} + _ Render = YAML{} + _ Render = MsgPack{} +) + +func writeContentType(w http.ResponseWriter, value []string) { + header := w.Header() + if val := header["Content-Type"]; len(val) == 0 { + header["Content-Type"] = value + } +} diff --git a/vendor/github.com/gin-gonic/gin/render/render_test.go b/vendor/github.com/gin-gonic/gin/render/render_test.go new file mode 100644 index 000000000..530e222ac --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/render/render_test.go @@ -0,0 +1,349 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "bytes" + "encoding/xml" + "errors" + "html/template" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/ugorji/go/codec" +) + +// TODO unit tests +// test errors + +func TestRenderMsgPack(t *testing.T) { + w := httptest.NewRecorder() + data := map[string]interface{}{ + "foo": "bar", + } + + (MsgPack{data}).WriteContentType(w) + assert.Equal(t, w.Header().Get("Content-Type"), "application/msgpack; charset=utf-8") + + err := (MsgPack{data}).Render(w) + + assert.NoError(t, err) + + h := new(codec.MsgpackHandle) + assert.NotNil(t, h) + buf := bytes.NewBuffer([]byte{}) + assert.NotNil(t, buf) + err = codec.NewEncoder(buf, h).Encode(data) + + assert.NoError(t, err) + assert.Equal(t, w.Body.String(), string(buf.Bytes())) + assert.Equal(t, w.Header().Get("Content-Type"), "application/msgpack; charset=utf-8") +} + +func TestRenderJSON(t *testing.T) { + w := httptest.NewRecorder() + data := map[string]interface{}{ + "foo": "bar", + } + + (JSON{data}).WriteContentType(w) + assert.Equal(t, "application/json; charset=utf-8", w.Header().Get("Content-Type")) + + err := (JSON{data}).Render(w) + + assert.NoError(t, err) + assert.Equal(t, "{\"foo\":\"bar\"}", w.Body.String()) + assert.Equal(t, "application/json; charset=utf-8", w.Header().Get("Content-Type")) +} + +func TestRenderJSONPanics(t *testing.T) { + w := httptest.NewRecorder() + data := make(chan int) + + // json: unsupported type: chan int + assert.Panics(t, func() { (JSON{data}).Render(w) }) +} + +func TestRenderIndentedJSON(t *testing.T) { + w := httptest.NewRecorder() + data := map[string]interface{}{ + "foo": "bar", + "bar": "foo", + } + + err := (IndentedJSON{data}).Render(w) + + assert.NoError(t, err) + assert.Equal(t, w.Body.String(), "{\n \"bar\": \"foo\",\n \"foo\": \"bar\"\n}") + assert.Equal(t, w.Header().Get("Content-Type"), "application/json; charset=utf-8") +} + +func TestRenderIndentedJSONPanics(t *testing.T) { + w := httptest.NewRecorder() + data := make(chan int) + + // json: unsupported type: chan int + err := (IndentedJSON{data}).Render(w) + assert.Error(t, err) +} + +func TestRenderSecureJSON(t *testing.T) { + w1 := httptest.NewRecorder() + data := map[string]interface{}{ + "foo": "bar", + } + + (SecureJSON{"while(1);", data}).WriteContentType(w1) + assert.Equal(t, "application/json; charset=utf-8", w1.Header().Get("Content-Type")) + + err1 := (SecureJSON{"while(1);", data}).Render(w1) + + assert.NoError(t, err1) + assert.Equal(t, "{\"foo\":\"bar\"}", w1.Body.String()) + assert.Equal(t, "application/json; charset=utf-8", w1.Header().Get("Content-Type")) + + w2 := httptest.NewRecorder() + datas := []map[string]interface{}{{ + "foo": "bar", + }, { + "bar": "foo", + }} + + err2 := (SecureJSON{"while(1);", datas}).Render(w2) + assert.NoError(t, err2) + assert.Equal(t, "while(1);[{\"foo\":\"bar\"},{\"bar\":\"foo\"}]", w2.Body.String()) + assert.Equal(t, "application/json; charset=utf-8", w2.Header().Get("Content-Type")) +} + +func TestRenderSecureJSONFail(t *testing.T) { + w := httptest.NewRecorder() + data := make(chan int) + + // json: unsupported type: chan int + err := (SecureJSON{"while(1);", data}).Render(w) + assert.Error(t, err) +} + +type xmlmap map[string]interface{} + +// Allows type H to be used with xml.Marshal +func (h xmlmap) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name = xml.Name{ + Space: "", + Local: "map", + } + if err := e.EncodeToken(start); err != nil { + return err + } + for key, value := range h { + elem := xml.StartElement{ + Name: xml.Name{Space: "", Local: key}, + Attr: []xml.Attr{}, + } + if err := e.EncodeElement(value, elem); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +func TestRenderYAML(t *testing.T) { + w := httptest.NewRecorder() + data := ` +a : Easy! +b: + c: 2 + d: [3, 4] + ` + (YAML{data}).WriteContentType(w) + assert.Equal(t, w.Header().Get("Content-Type"), "application/x-yaml; charset=utf-8") + + err := (YAML{data}).Render(w) + assert.NoError(t, err) + assert.Equal(t, w.Body.String(), "\"\\na : Easy!\\nb:\\n\\tc: 2\\n\\td: [3, 4]\\n\\t\"\n") + assert.Equal(t, w.Header().Get("Content-Type"), "application/x-yaml; charset=utf-8") +} + +type fail struct{} + +// Hook MarshalYAML +func (ft *fail) MarshalYAML() (interface{}, error) { + return nil, errors.New("fail") +} + +func TestRenderYAMLFail(t *testing.T) { + w := httptest.NewRecorder() + err := (YAML{&fail{}}).Render(w) + assert.Error(t, err) +} + +func TestRenderXML(t *testing.T) { + w := httptest.NewRecorder() + data := xmlmap{ + "foo": "bar", + } + + (XML{data}).WriteContentType(w) + assert.Equal(t, w.Header().Get("Content-Type"), "application/xml; charset=utf-8") + + err := (XML{data}).Render(w) + + assert.NoError(t, err) + assert.Equal(t, w.Body.String(), "bar") + assert.Equal(t, w.Header().Get("Content-Type"), "application/xml; charset=utf-8") +} + +func TestRenderRedirect(t *testing.T) { + req, err := http.NewRequest("GET", "/test-redirect", nil) + assert.NoError(t, err) + + data1 := Redirect{ + Code: 301, + Request: req, + Location: "/new/location", + } + + w := httptest.NewRecorder() + err = data1.Render(w) + assert.NoError(t, err) + + data2 := Redirect{ + Code: 200, + Request: req, + Location: "/new/location", + } + + w = httptest.NewRecorder() + assert.Panics(t, func() { data2.Render(w) }) + + // only improve coverage + data2.WriteContentType(w) +} + +func TestRenderData(t *testing.T) { + w := httptest.NewRecorder() + data := []byte("#!PNG some raw data") + + err := (Data{ + ContentType: "image/png", + Data: data, + }).Render(w) + + assert.NoError(t, err) + assert.Equal(t, w.Body.String(), "#!PNG some raw data") + assert.Equal(t, w.Header().Get("Content-Type"), "image/png") +} + +func TestRenderString(t *testing.T) { + w := httptest.NewRecorder() + + (String{ + Format: "hello %s %d", + Data: []interface{}{}, + }).WriteContentType(w) + assert.Equal(t, w.Header().Get("Content-Type"), "text/plain; charset=utf-8") + + err := (String{ + Format: "hola %s %d", + Data: []interface{}{"manu", 2}, + }).Render(w) + + assert.NoError(t, err) + assert.Equal(t, w.Body.String(), "hola manu 2") + assert.Equal(t, w.Header().Get("Content-Type"), "text/plain; charset=utf-8") +} + +func TestRenderStringLenZero(t *testing.T) { + w := httptest.NewRecorder() + + err := (String{ + Format: "hola %s %d", + Data: []interface{}{}, + }).Render(w) + + assert.NoError(t, err) + assert.Equal(t, w.Body.String(), "hola %s %d") + assert.Equal(t, w.Header().Get("Content-Type"), "text/plain; charset=utf-8") +} + +func TestRenderHTMLTemplate(t *testing.T) { + w := httptest.NewRecorder() + templ := template.Must(template.New("t").Parse(`Hello {{.name}}`)) + + htmlRender := HTMLProduction{Template: templ} + instance := htmlRender.Instance("t", map[string]interface{}{ + "name": "alexandernyquist", + }) + + err := instance.Render(w) + + assert.NoError(t, err) + assert.Equal(t, w.Body.String(), "Hello alexandernyquist") + assert.Equal(t, w.Header().Get("Content-Type"), "text/html; charset=utf-8") +} + +func TestRenderHTMLTemplateEmptyName(t *testing.T) { + w := httptest.NewRecorder() + templ := template.Must(template.New("").Parse(`Hello {{.name}}`)) + + htmlRender := HTMLProduction{Template: templ} + instance := htmlRender.Instance("", map[string]interface{}{ + "name": "alexandernyquist", + }) + + err := instance.Render(w) + + assert.NoError(t, err) + assert.Equal(t, w.Body.String(), "Hello alexandernyquist") + assert.Equal(t, w.Header().Get("Content-Type"), "text/html; charset=utf-8") +} + +func TestRenderHTMLDebugFiles(t *testing.T) { + w := httptest.NewRecorder() + htmlRender := HTMLDebug{Files: []string{"../fixtures/basic/hello.tmpl"}, + Glob: "", + Delims: Delims{Left: "{[{", Right: "}]}"}, + FuncMap: nil, + } + instance := htmlRender.Instance("hello.tmpl", map[string]interface{}{ + "name": "thinkerou", + }) + + err := instance.Render(w) + + assert.NoError(t, err) + assert.Equal(t, w.Body.String(), "

Hello thinkerou

") + assert.Equal(t, w.Header().Get("Content-Type"), "text/html; charset=utf-8") +} + +func TestRenderHTMLDebugGlob(t *testing.T) { + w := httptest.NewRecorder() + htmlRender := HTMLDebug{Files: nil, + Glob: "../fixtures/basic/hello*", + Delims: Delims{Left: "{[{", Right: "}]}"}, + FuncMap: nil, + } + instance := htmlRender.Instance("hello.tmpl", map[string]interface{}{ + "name": "thinkerou", + }) + + err := instance.Render(w) + + assert.NoError(t, err) + assert.Equal(t, w.Body.String(), "

Hello thinkerou

") + assert.Equal(t, w.Header().Get("Content-Type"), "text/html; charset=utf-8") +} + +func TestRenderHTMLDebugPanics(t *testing.T) { + htmlRender := HTMLDebug{Files: nil, + Glob: "", + Delims: Delims{"{{", "}}"}, + FuncMap: nil, + } + assert.Panics(t, func() { htmlRender.Instance("", nil) }) +} diff --git a/vendor/github.com/gin-gonic/gin/render/text.go b/vendor/github.com/gin-gonic/gin/render/text.go new file mode 100644 index 000000000..74cd26bee --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/render/text.go @@ -0,0 +1,36 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "fmt" + "io" + "net/http" +) + +type String struct { + Format string + Data []interface{} +} + +var plainContentType = []string{"text/plain; charset=utf-8"} + +func (r String) Render(w http.ResponseWriter) error { + WriteString(w, r.Format, r.Data) + return nil +} + +func (r String) WriteContentType(w http.ResponseWriter) { + writeContentType(w, plainContentType) +} + +func WriteString(w http.ResponseWriter, format string, data []interface{}) { + writeContentType(w, plainContentType) + if len(data) > 0 { + fmt.Fprintf(w, format, data...) + } else { + io.WriteString(w, format) + } +} diff --git a/vendor/github.com/gin-gonic/gin/render/xml.go b/vendor/github.com/gin-gonic/gin/render/xml.go new file mode 100644 index 000000000..cff1ac3e8 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/render/xml.go @@ -0,0 +1,25 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "encoding/xml" + "net/http" +) + +type XML struct { + Data interface{} +} + +var xmlContentType = []string{"application/xml; charset=utf-8"} + +func (r XML) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + return xml.NewEncoder(w).Encode(r.Data) +} + +func (r XML) WriteContentType(w http.ResponseWriter) { + writeContentType(w, xmlContentType) +} diff --git a/vendor/github.com/gin-gonic/gin/render/yaml.go b/vendor/github.com/gin-gonic/gin/render/yaml.go new file mode 100644 index 000000000..25d0ebd47 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/render/yaml.go @@ -0,0 +1,33 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "net/http" + + "gopkg.in/yaml.v2" +) + +type YAML struct { + Data interface{} +} + +var yamlContentType = []string{"application/x-yaml; charset=utf-8"} + +func (r YAML) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + + bytes, err := yaml.Marshal(r.Data) + if err != nil { + return err + } + + w.Write(bytes) + return nil +} + +func (r YAML) WriteContentType(w http.ResponseWriter) { + writeContentType(w, yamlContentType) +} diff --git a/vendor/github.com/gin-gonic/gin/response_writer.go b/vendor/github.com/gin-gonic/gin/response_writer.go new file mode 100644 index 000000000..232f00aac --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/response_writer.go @@ -0,0 +1,114 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + noWritten = -1 + defaultStatus = 200 +) + +type ResponseWriter interface { + http.ResponseWriter + http.Hijacker + http.Flusher + http.CloseNotifier + + // Returns the HTTP response status code of the current request. + Status() int + + // Returns the number of bytes already written into the response http body. + // See Written() + Size() int + + // Writes the string into the response body. + WriteString(string) (int, error) + + // Returns true if the response body was already written. + Written() bool + + // Forces to write the http header (status code + headers). + WriteHeaderNow() +} + +type responseWriter struct { + http.ResponseWriter + size int + status int +} + +var _ ResponseWriter = &responseWriter{} + +func (w *responseWriter) reset(writer http.ResponseWriter) { + w.ResponseWriter = writer + w.size = noWritten + w.status = defaultStatus +} + +func (w *responseWriter) WriteHeader(code int) { + if code > 0 && w.status != code { + if w.Written() { + debugPrint("[WARNING] Headers were already written. Wanted to override status code %d with %d", w.status, code) + } + w.status = code + } +} + +func (w *responseWriter) WriteHeaderNow() { + if !w.Written() { + w.size = 0 + w.ResponseWriter.WriteHeader(w.status) + } +} + +func (w *responseWriter) Write(data []byte) (n int, err error) { + w.WriteHeaderNow() + n, err = w.ResponseWriter.Write(data) + w.size += n + return +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + w.WriteHeaderNow() + n, err = io.WriteString(w.ResponseWriter, s) + w.size += n + return +} + +func (w *responseWriter) Status() int { + return w.status +} + +func (w *responseWriter) Size() int { + return w.size +} + +func (w *responseWriter) Written() bool { + return w.size != noWritten +} + +// Hijack implements the http.Hijacker interface. +func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if w.size < 0 { + w.size = 0 + } + return w.ResponseWriter.(http.Hijacker).Hijack() +} + +// CloseNotify implements the http.CloseNotify interface. +func (w *responseWriter) CloseNotify() <-chan bool { + return w.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +// Flush implements the http.Flush interface. +func (w *responseWriter) Flush() { + w.ResponseWriter.(http.Flusher).Flush() +} diff --git a/vendor/github.com/gin-gonic/gin/response_writer_test.go b/vendor/github.com/gin-gonic/gin/response_writer_test.go new file mode 100644 index 000000000..cec27338d --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/response_writer_test.go @@ -0,0 +1,115 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +// TODO +// func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { +// func (w *responseWriter) CloseNotify() <-chan bool { +// func (w *responseWriter) Flush() { + +var _ ResponseWriter = &responseWriter{} +var _ http.ResponseWriter = &responseWriter{} +var _ http.ResponseWriter = ResponseWriter(&responseWriter{}) +var _ http.Hijacker = ResponseWriter(&responseWriter{}) +var _ http.Flusher = ResponseWriter(&responseWriter{}) +var _ http.CloseNotifier = ResponseWriter(&responseWriter{}) + +func init() { + SetMode(TestMode) +} + +func TestResponseWriterReset(t *testing.T) { + testWritter := httptest.NewRecorder() + writer := &responseWriter{} + var w ResponseWriter = writer + + writer.reset(testWritter) + assert.Equal(t, -1, writer.size) + assert.Equal(t, 200, writer.status) + assert.Equal(t, testWritter, writer.ResponseWriter) + assert.Equal(t, -1, w.Size()) + assert.Equal(t, 200, w.Status()) + assert.False(t, w.Written()) +} + +func TestResponseWriterWriteHeader(t *testing.T) { + testWritter := httptest.NewRecorder() + writer := &responseWriter{} + writer.reset(testWritter) + w := ResponseWriter(writer) + + w.WriteHeader(300) + assert.False(t, w.Written()) + assert.Equal(t, 300, w.Status()) + assert.NotEqual(t, testWritter.Code, 300) + + w.WriteHeader(-1) + assert.Equal(t, 300, w.Status()) +} + +func TestResponseWriterWriteHeadersNow(t *testing.T) { + testWritter := httptest.NewRecorder() + writer := &responseWriter{} + writer.reset(testWritter) + w := ResponseWriter(writer) + + w.WriteHeader(300) + w.WriteHeaderNow() + + assert.True(t, w.Written()) + assert.Equal(t, 0, w.Size()) + assert.Equal(t, 300, testWritter.Code) + + writer.size = 10 + w.WriteHeaderNow() + assert.Equal(t, 10, w.Size()) +} + +func TestResponseWriterWrite(t *testing.T) { + testWritter := httptest.NewRecorder() + writer := &responseWriter{} + writer.reset(testWritter) + w := ResponseWriter(writer) + + n, err := w.Write([]byte("hola")) + assert.Equal(t, 4, n) + assert.Equal(t, 4, w.Size()) + assert.Equal(t, 200, w.Status()) + assert.Equal(t, 200, testWritter.Code) + assert.Equal(t, "hola", testWritter.Body.String()) + assert.NoError(t, err) + + n, err = w.Write([]byte(" adios")) + assert.Equal(t, 6, n) + assert.Equal(t, 10, w.Size()) + assert.Equal(t, "hola adios", testWritter.Body.String()) + assert.NoError(t, err) +} + +func TestResponseWriterHijack(t *testing.T) { + testWritter := httptest.NewRecorder() + writer := &responseWriter{} + writer.reset(testWritter) + w := ResponseWriter(writer) + + assert.Panics(t, func() { + w.Hijack() + }) + assert.True(t, w.Written()) + + assert.Panics(t, func() { + w.CloseNotify() + }) + + w.Flush() +} diff --git a/vendor/github.com/gin-gonic/gin/routergroup.go b/vendor/github.com/gin-gonic/gin/routergroup.go new file mode 100644 index 000000000..5e681c1cb --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/routergroup.go @@ -0,0 +1,213 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "net/http" + "path" + "regexp" + "strings" +) + +type IRouter interface { + IRoutes + Group(string, ...HandlerFunc) *RouterGroup +} + +type IRoutes interface { + Use(...HandlerFunc) IRoutes + + Handle(string, string, ...HandlerFunc) IRoutes + Any(string, ...HandlerFunc) IRoutes + GET(string, ...HandlerFunc) IRoutes + POST(string, ...HandlerFunc) IRoutes + DELETE(string, ...HandlerFunc) IRoutes + PATCH(string, ...HandlerFunc) IRoutes + PUT(string, ...HandlerFunc) IRoutes + OPTIONS(string, ...HandlerFunc) IRoutes + HEAD(string, ...HandlerFunc) IRoutes + + StaticFile(string, string) IRoutes + Static(string, string) IRoutes + StaticFS(string, http.FileSystem) IRoutes +} + +// RouterGroup is used internally to configure router, a RouterGroup is associated with a prefix +// and an array of handlers (middleware). +type RouterGroup struct { + Handlers HandlersChain + basePath string + engine *Engine + root bool +} + +var _ IRouter = &RouterGroup{} + +// Use adds middleware to the group, see example code in github. +func (group *RouterGroup) Use(middleware ...HandlerFunc) IRoutes { + group.Handlers = append(group.Handlers, middleware...) + return group.returnObj() +} + +// Group creates a new router group. You should add all the routes that have common middlwares or the same path prefix. +// For example, all the routes that use a common middlware for authorization could be grouped. +func (group *RouterGroup) Group(relativePath string, handlers ...HandlerFunc) *RouterGroup { + return &RouterGroup{ + Handlers: group.combineHandlers(handlers), + basePath: group.calculateAbsolutePath(relativePath), + engine: group.engine, + } +} + +func (group *RouterGroup) BasePath() string { + return group.basePath +} + +func (group *RouterGroup) handle(httpMethod, relativePath string, handlers HandlersChain) IRoutes { + absolutePath := group.calculateAbsolutePath(relativePath) + handlers = group.combineHandlers(handlers) + group.engine.addRoute(httpMethod, absolutePath, handlers) + return group.returnObj() +} + +// Handle registers a new request handle and middleware with the given path and method. +// The last handler should be the real handler, the other ones should be middleware that can and should be shared among different routes. +// See the example code in github. +// +// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut +// functions can be used. +// +// This function is intended for bulk loading and to allow the usage of less +// frequently used, non-standardized or custom methods (e.g. for internal +// communication with a proxy). +func (group *RouterGroup) Handle(httpMethod, relativePath string, handlers ...HandlerFunc) IRoutes { + if matches, err := regexp.MatchString("^[A-Z]+$", httpMethod); !matches || err != nil { + panic("http method " + httpMethod + " is not valid") + } + return group.handle(httpMethod, relativePath, handlers) +} + +// POST is a shortcut for router.Handle("POST", path, handle). +func (group *RouterGroup) POST(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle("POST", relativePath, handlers) +} + +// GET is a shortcut for router.Handle("GET", path, handle). +func (group *RouterGroup) GET(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle("GET", relativePath, handlers) +} + +// DELETE is a shortcut for router.Handle("DELETE", path, handle). +func (group *RouterGroup) DELETE(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle("DELETE", relativePath, handlers) +} + +// PATCH is a shortcut for router.Handle("PATCH", path, handle). +func (group *RouterGroup) PATCH(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle("PATCH", relativePath, handlers) +} + +// PUT is a shortcut for router.Handle("PUT", path, handle). +func (group *RouterGroup) PUT(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle("PUT", relativePath, handlers) +} + +// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle). +func (group *RouterGroup) OPTIONS(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle("OPTIONS", relativePath, handlers) +} + +// HEAD is a shortcut for router.Handle("HEAD", path, handle). +func (group *RouterGroup) HEAD(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle("HEAD", relativePath, handlers) +} + +// Any registers a route that matches all the HTTP methods. +// GET, POST, PUT, PATCH, HEAD, OPTIONS, DELETE, CONNECT, TRACE. +func (group *RouterGroup) Any(relativePath string, handlers ...HandlerFunc) IRoutes { + group.handle("GET", relativePath, handlers) + group.handle("POST", relativePath, handlers) + group.handle("PUT", relativePath, handlers) + group.handle("PATCH", relativePath, handlers) + group.handle("HEAD", relativePath, handlers) + group.handle("OPTIONS", relativePath, handlers) + group.handle("DELETE", relativePath, handlers) + group.handle("CONNECT", relativePath, handlers) + group.handle("TRACE", relativePath, handlers) + return group.returnObj() +} + +// StaticFile registers a single route in order to server a single file of the local filesystem. +// router.StaticFile("favicon.ico", "./resources/favicon.ico") +func (group *RouterGroup) StaticFile(relativePath, filepath string) IRoutes { + if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") { + panic("URL parameters can not be used when serving a static file") + } + handler := func(c *Context) { + c.File(filepath) + } + group.GET(relativePath, handler) + group.HEAD(relativePath, handler) + return group.returnObj() +} + +// Static serves files from the given file system root. +// Internally a http.FileServer is used, therefore http.NotFound is used instead +// of the Router's NotFound handler. +// To use the operating system's file system implementation, +// use : +// router.Static("/static", "/var/www") +func (group *RouterGroup) Static(relativePath, root string) IRoutes { + return group.StaticFS(relativePath, Dir(root, false)) +} + +// StaticFS works just like `Static()` but a custom `http.FileSystem` can be used instead. +// Gin by default user: gin.Dir() +func (group *RouterGroup) StaticFS(relativePath string, fs http.FileSystem) IRoutes { + if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") { + panic("URL parameters can not be used when serving a static folder") + } + handler := group.createStaticHandler(relativePath, fs) + urlPattern := path.Join(relativePath, "/*filepath") + + // Register GET and HEAD handlers + group.GET(urlPattern, handler) + group.HEAD(urlPattern, handler) + return group.returnObj() +} + +func (group *RouterGroup) createStaticHandler(relativePath string, fs http.FileSystem) HandlerFunc { + absolutePath := group.calculateAbsolutePath(relativePath) + fileServer := http.StripPrefix(absolutePath, http.FileServer(fs)) + _, nolisting := fs.(*onlyfilesFS) + return func(c *Context) { + if nolisting { + c.Writer.WriteHeader(404) + } + fileServer.ServeHTTP(c.Writer, c.Request) + } +} + +func (group *RouterGroup) combineHandlers(handlers HandlersChain) HandlersChain { + finalSize := len(group.Handlers) + len(handlers) + if finalSize >= int(abortIndex) { + panic("too many handlers") + } + mergedHandlers := make(HandlersChain, finalSize) + copy(mergedHandlers, group.Handlers) + copy(mergedHandlers[len(group.Handlers):], handlers) + return mergedHandlers +} + +func (group *RouterGroup) calculateAbsolutePath(relativePath string) string { + return joinPaths(group.basePath, relativePath) +} + +func (group *RouterGroup) returnObj() IRoutes { + if group.root { + return group.engine + } + return group +} diff --git a/vendor/github.com/gin-gonic/gin/routergroup_test.go b/vendor/github.com/gin-gonic/gin/routergroup_test.go new file mode 100644 index 000000000..a362e23dd --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/routergroup_test.go @@ -0,0 +1,177 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func init() { + SetMode(TestMode) +} + +func TestRouterGroupBasic(t *testing.T) { + router := New() + group := router.Group("/hola", func(c *Context) {}) + group.Use(func(c *Context) {}) + + assert.Len(t, group.Handlers, 2) + assert.Equal(t, "/hola", group.BasePath()) + assert.Equal(t, router, group.engine) + + group2 := group.Group("manu") + group2.Use(func(c *Context) {}, func(c *Context) {}) + + assert.Len(t, group2.Handlers, 4) + assert.Equal(t, "/hola/manu", group2.BasePath()) + assert.Equal(t, router, group2.engine) +} + +func TestRouterGroupBasicHandle(t *testing.T) { + performRequestInGroup(t, "GET") + performRequestInGroup(t, "POST") + performRequestInGroup(t, "PUT") + performRequestInGroup(t, "PATCH") + performRequestInGroup(t, "DELETE") + performRequestInGroup(t, "HEAD") + performRequestInGroup(t, "OPTIONS") +} + +func performRequestInGroup(t *testing.T, method string) { + router := New() + v1 := router.Group("v1", func(c *Context) {}) + assert.Equal(t, "/v1", v1.BasePath()) + + login := v1.Group("/login/", func(c *Context) {}, func(c *Context) {}) + assert.Equal(t, "/v1/login/", login.BasePath()) + + handler := func(c *Context) { + c.String(400, "the method was %s and index %d", c.Request.Method, c.index) + } + + switch method { + case "GET": + v1.GET("/test", handler) + login.GET("/test", handler) + case "POST": + v1.POST("/test", handler) + login.POST("/test", handler) + case "PUT": + v1.PUT("/test", handler) + login.PUT("/test", handler) + case "PATCH": + v1.PATCH("/test", handler) + login.PATCH("/test", handler) + case "DELETE": + v1.DELETE("/test", handler) + login.DELETE("/test", handler) + case "HEAD": + v1.HEAD("/test", handler) + login.HEAD("/test", handler) + case "OPTIONS": + v1.OPTIONS("/test", handler) + login.OPTIONS("/test", handler) + default: + panic("unknown method") + } + + w := performRequest(router, method, "/v1/login/test") + assert.Equal(t, 400, w.Code) + assert.Equal(t, "the method was "+method+" and index 3", w.Body.String()) + + w = performRequest(router, method, "/v1/test") + assert.Equal(t, 400, w.Code) + assert.Equal(t, "the method was "+method+" and index 1", w.Body.String()) +} + +func TestRouterGroupInvalidStatic(t *testing.T) { + router := New() + assert.Panics(t, func() { + router.Static("/path/:param", "/") + }) + + assert.Panics(t, func() { + router.Static("/path/*param", "/") + }) +} + +func TestRouterGroupInvalidStaticFile(t *testing.T) { + router := New() + assert.Panics(t, func() { + router.StaticFile("/path/:param", "favicon.ico") + }) + + assert.Panics(t, func() { + router.StaticFile("/path/*param", "favicon.ico") + }) +} + +func TestRouterGroupTooManyHandlers(t *testing.T) { + router := New() + handlers1 := make([]HandlerFunc, 40) + router.Use(handlers1...) + + handlers2 := make([]HandlerFunc, 26) + assert.Panics(t, func() { + router.Use(handlers2...) + }) + assert.Panics(t, func() { + router.GET("/", handlers2...) + }) +} + +func TestRouterGroupBadMethod(t *testing.T) { + router := New() + assert.Panics(t, func() { + router.Handle("get", "/") + }) + assert.Panics(t, func() { + router.Handle(" GET", "/") + }) + assert.Panics(t, func() { + router.Handle("GET ", "/") + }) + assert.Panics(t, func() { + router.Handle("", "/") + }) + assert.Panics(t, func() { + router.Handle("PO ST", "/") + }) + assert.Panics(t, func() { + router.Handle("1GET", "/") + }) + assert.Panics(t, func() { + router.Handle("PATCh", "/") + }) +} + +func TestRouterGroupPipeline(t *testing.T) { + router := New() + testRoutesInterface(t, router) + + v1 := router.Group("/v1") + testRoutesInterface(t, v1) +} + +func testRoutesInterface(t *testing.T, r IRoutes) { + handler := func(c *Context) {} + assert.Equal(t, r, r.Use(handler)) + + assert.Equal(t, r, r.Handle("GET", "/handler", handler)) + assert.Equal(t, r, r.Any("/any", handler)) + assert.Equal(t, r, r.GET("/", handler)) + assert.Equal(t, r, r.POST("/", handler)) + assert.Equal(t, r, r.DELETE("/", handler)) + assert.Equal(t, r, r.PATCH("/", handler)) + assert.Equal(t, r, r.PUT("/", handler)) + assert.Equal(t, r, r.OPTIONS("/", handler)) + assert.Equal(t, r, r.HEAD("/", handler)) + + assert.Equal(t, r, r.StaticFile("/file", ".")) + assert.Equal(t, r, r.Static("/static", ".")) + assert.Equal(t, r, r.StaticFS("/static2", Dir(".", false))) +} diff --git a/vendor/github.com/gin-gonic/gin/routes_test.go b/vendor/github.com/gin-gonic/gin/routes_test.go new file mode 100644 index 000000000..812939071 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/routes_test.go @@ -0,0 +1,453 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func performRequest(r http.Handler, method, path string) *httptest.ResponseRecorder { + req, _ := http.NewRequest(method, path, nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + return w +} + +func testRouteOK(method string, t *testing.T) { + passed := false + passedAny := false + r := New() + r.Any("/test2", func(c *Context) { + passedAny = true + }) + r.Handle(method, "/test", func(c *Context) { + passed = true + }) + + w := performRequest(r, method, "/test") + assert.True(t, passed) + assert.Equal(t, http.StatusOK, w.Code) + + performRequest(r, method, "/test2") + assert.True(t, passedAny) +} + +// TestSingleRouteOK tests that POST route is correctly invoked. +func testRouteNotOK(method string, t *testing.T) { + passed := false + router := New() + router.Handle(method, "/test_2", func(c *Context) { + passed = true + }) + + w := performRequest(router, method, "/test") + + assert.False(t, passed) + assert.Equal(t, http.StatusNotFound, w.Code) +} + +// TestSingleRouteOK tests that POST route is correctly invoked. +func testRouteNotOK2(method string, t *testing.T) { + passed := false + router := New() + router.HandleMethodNotAllowed = true + var methodRoute string + if method == "POST" { + methodRoute = "GET" + } else { + methodRoute = "POST" + } + router.Handle(methodRoute, "/test", func(c *Context) { + passed = true + }) + + w := performRequest(router, method, "/test") + + assert.False(t, passed) + assert.Equal(t, http.StatusMethodNotAllowed, w.Code) +} + +func TestRouterMethod(t *testing.T) { + router := New() + router.PUT("/hey2", func(c *Context) { + c.String(200, "sup2") + }) + + router.PUT("/hey", func(c *Context) { + c.String(200, "called") + }) + + router.PUT("/hey3", func(c *Context) { + c.String(200, "sup3") + }) + + w := performRequest(router, "PUT", "/hey") + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "called", w.Body.String()) +} + +func TestRouterGroupRouteOK(t *testing.T) { + testRouteOK("GET", t) + testRouteOK("POST", t) + testRouteOK("PUT", t) + testRouteOK("PATCH", t) + testRouteOK("HEAD", t) + testRouteOK("OPTIONS", t) + testRouteOK("DELETE", t) + testRouteOK("CONNECT", t) + testRouteOK("TRACE", t) +} + +func TestRouteNotOK(t *testing.T) { + testRouteNotOK("GET", t) + testRouteNotOK("POST", t) + testRouteNotOK("PUT", t) + testRouteNotOK("PATCH", t) + testRouteNotOK("HEAD", t) + testRouteNotOK("OPTIONS", t) + testRouteNotOK("DELETE", t) + testRouteNotOK("CONNECT", t) + testRouteNotOK("TRACE", t) +} + +func TestRouteNotOK2(t *testing.T) { + testRouteNotOK2("GET", t) + testRouteNotOK2("POST", t) + testRouteNotOK2("PUT", t) + testRouteNotOK2("PATCH", t) + testRouteNotOK2("HEAD", t) + testRouteNotOK2("OPTIONS", t) + testRouteNotOK2("DELETE", t) + testRouteNotOK2("CONNECT", t) + testRouteNotOK2("TRACE", t) +} + +func TestRouteRedirectTrailingSlash(t *testing.T) { + router := New() + router.RedirectFixedPath = false + router.RedirectTrailingSlash = true + router.GET("/path", func(c *Context) {}) + router.GET("/path2/", func(c *Context) {}) + router.POST("/path3", func(c *Context) {}) + router.PUT("/path4/", func(c *Context) {}) + + w := performRequest(router, "GET", "/path/") + assert.Equal(t, "/path", w.Header().Get("Location")) + assert.Equal(t, 301, w.Code) + + w = performRequest(router, "GET", "/path2") + assert.Equal(t, "/path2/", w.Header().Get("Location")) + assert.Equal(t, 301, w.Code) + + w = performRequest(router, "POST", "/path3/") + assert.Equal(t, "/path3", w.Header().Get("Location")) + assert.Equal(t, 307, w.Code) + + w = performRequest(router, "PUT", "/path4") + assert.Equal(t, "/path4/", w.Header().Get("Location")) + assert.Equal(t, 307, w.Code) + + w = performRequest(router, "GET", "/path") + assert.Equal(t, 200, w.Code) + + w = performRequest(router, "GET", "/path2/") + assert.Equal(t, 200, w.Code) + + w = performRequest(router, "POST", "/path3") + assert.Equal(t, 200, w.Code) + + w = performRequest(router, "PUT", "/path4/") + assert.Equal(t, 200, w.Code) + + router.RedirectTrailingSlash = false + + w = performRequest(router, "GET", "/path/") + assert.Equal(t, 404, w.Code) + w = performRequest(router, "GET", "/path2") + assert.Equal(t, 404, w.Code) + w = performRequest(router, "POST", "/path3/") + assert.Equal(t, 404, w.Code) + w = performRequest(router, "PUT", "/path4") + assert.Equal(t, 404, w.Code) +} + +func TestRouteRedirectFixedPath(t *testing.T) { + router := New() + router.RedirectFixedPath = true + router.RedirectTrailingSlash = false + + router.GET("/path", func(c *Context) {}) + router.GET("/Path2", func(c *Context) {}) + router.POST("/PATH3", func(c *Context) {}) + router.POST("/Path4/", func(c *Context) {}) + + w := performRequest(router, "GET", "/PATH") + assert.Equal(t, "/path", w.Header().Get("Location")) + assert.Equal(t, 301, w.Code) + + w = performRequest(router, "GET", "/path2") + assert.Equal(t, "/Path2", w.Header().Get("Location")) + assert.Equal(t, 301, w.Code) + + w = performRequest(router, "POST", "/path3") + assert.Equal(t, "/PATH3", w.Header().Get("Location")) + assert.Equal(t, 307, w.Code) + + w = performRequest(router, "POST", "/path4") + assert.Equal(t, "/Path4/", w.Header().Get("Location")) + assert.Equal(t, 307, w.Code) +} + +// TestContextParamsGet tests that a parameter can be parsed from the URL. +func TestRouteParamsByName(t *testing.T) { + name := "" + lastName := "" + wild := "" + router := New() + router.GET("/test/:name/:last_name/*wild", func(c *Context) { + name = c.Params.ByName("name") + lastName = c.Params.ByName("last_name") + var ok bool + wild, ok = c.Params.Get("wild") + + assert.True(t, ok) + assert.Equal(t, name, c.Param("name")) + assert.Equal(t, name, c.Param("name")) + assert.Equal(t, lastName, c.Param("last_name")) + + assert.Empty(t, c.Param("wtf")) + assert.Empty(t, c.Params.ByName("wtf")) + + wtf, ok := c.Params.Get("wtf") + assert.Empty(t, wtf) + assert.False(t, ok) + }) + + w := performRequest(router, "GET", "/test/john/smith/is/super/great") + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "john", name) + assert.Equal(t, "smith", lastName) + assert.Equal(t, "/is/super/great", wild) +} + +// TestHandleStaticFile - ensure the static file handles properly +func TestRouteStaticFile(t *testing.T) { + // SETUP file + testRoot, _ := os.Getwd() + f, err := ioutil.TempFile(testRoot, "") + if err != nil { + t.Error(err) + } + defer os.Remove(f.Name()) + f.WriteString("Gin Web Framework") + f.Close() + + dir, filename := filepath.Split(f.Name()) + + // SETUP gin + router := New() + router.Static("/using_static", dir) + router.StaticFile("/result", f.Name()) + + w := performRequest(router, "GET", "/using_static/"+filename) + w2 := performRequest(router, "GET", "/result") + + assert.Equal(t, w, w2) + assert.Equal(t, 200, w.Code) + assert.Equal(t, "Gin Web Framework", w.Body.String()) + assert.Equal(t, "text/plain; charset=utf-8", w.HeaderMap.Get("Content-Type")) + + w3 := performRequest(router, "HEAD", "/using_static/"+filename) + w4 := performRequest(router, "HEAD", "/result") + + assert.Equal(t, w3, w4) + assert.Equal(t, 200, w3.Code) +} + +// TestHandleStaticDir - ensure the root/sub dir handles properly +func TestRouteStaticListingDir(t *testing.T) { + router := New() + router.StaticFS("/", Dir("./", true)) + + w := performRequest(router, "GET", "/") + + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "gin.go") + assert.Equal(t, "text/html; charset=utf-8", w.HeaderMap.Get("Content-Type")) +} + +// TestHandleHeadToDir - ensure the root/sub dir handles properly +func TestRouteStaticNoListing(t *testing.T) { + router := New() + router.Static("/", "./") + + w := performRequest(router, "GET", "/") + + assert.Equal(t, 404, w.Code) + assert.NotContains(t, w.Body.String(), "gin.go") +} + +func TestRouterMiddlewareAndStatic(t *testing.T) { + router := New() + static := router.Group("/", func(c *Context) { + c.Writer.Header().Add("Last-Modified", "Mon, 02 Jan 2006 15:04:05 MST") + c.Writer.Header().Add("Expires", "Mon, 02 Jan 2006 15:04:05 MST") + c.Writer.Header().Add("X-GIN", "Gin Framework") + }) + static.Static("/", "./") + + w := performRequest(router, "GET", "/gin.go") + + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "package gin") + assert.Equal(t, "text/plain; charset=utf-8", w.HeaderMap.Get("Content-Type")) + assert.NotEqual(t, w.HeaderMap.Get("Last-Modified"), "Mon, 02 Jan 2006 15:04:05 MST") + assert.Equal(t, "Mon, 02 Jan 2006 15:04:05 MST", w.HeaderMap.Get("Expires")) + assert.Equal(t, "Gin Framework", w.HeaderMap.Get("x-GIN")) +} + +func TestRouteNotAllowedEnabled(t *testing.T) { + router := New() + router.HandleMethodNotAllowed = true + router.POST("/path", func(c *Context) {}) + w := performRequest(router, "GET", "/path") + assert.Equal(t, http.StatusMethodNotAllowed, w.Code) + + router.NoMethod(func(c *Context) { + c.String(http.StatusTeapot, "responseText") + }) + w = performRequest(router, "GET", "/path") + assert.Equal(t, "responseText", w.Body.String()) + assert.Equal(t, http.StatusTeapot, w.Code) +} + +func TestRouteNotAllowedDisabled(t *testing.T) { + router := New() + router.HandleMethodNotAllowed = false + router.POST("/path", func(c *Context) {}) + w := performRequest(router, "GET", "/path") + assert.Equal(t, 404, w.Code) + + router.NoMethod(func(c *Context) { + c.String(http.StatusTeapot, "responseText") + }) + w = performRequest(router, "GET", "/path") + assert.Equal(t, "404 page not found", w.Body.String()) + assert.Equal(t, 404, w.Code) +} + +func TestRouterNotFound(t *testing.T) { + router := New() + router.RedirectFixedPath = true + router.GET("/path", func(c *Context) {}) + router.GET("/dir/", func(c *Context) {}) + router.GET("/", func(c *Context) {}) + + testRoutes := []struct { + route string + code int + location string + }{ + {"/path/", 301, "/path"}, // TSR -/ + {"/dir", 301, "/dir/"}, // TSR +/ + {"", 301, "/"}, // TSR +/ + {"/PATH", 301, "/path"}, // Fixed Case + {"/DIR/", 301, "/dir/"}, // Fixed Case + {"/PATH/", 301, "/path"}, // Fixed Case -/ + {"/DIR", 301, "/dir/"}, // Fixed Case +/ + {"/../path", 301, "/path"}, // CleanPath + {"/nope", 404, ""}, // NotFound + } + for _, tr := range testRoutes { + w := performRequest(router, "GET", tr.route) + assert.Equal(t, tr.code, w.Code) + if w.Code != 404 { + assert.Equal(t, tr.location, fmt.Sprint(w.Header().Get("Location"))) + } + } + + // Test custom not found handler + var notFound bool + router.NoRoute(func(c *Context) { + c.AbortWithStatus(404) + notFound = true + }) + w := performRequest(router, "GET", "/nope") + assert.Equal(t, 404, w.Code) + assert.True(t, notFound) + + // Test other method than GET (want 307 instead of 301) + router.PATCH("/path", func(c *Context) {}) + w = performRequest(router, "PATCH", "/path/") + assert.Equal(t, 307, w.Code) + assert.Equal(t, "map[Location:[/path]]", fmt.Sprint(w.Header())) + + // Test special case where no node for the prefix "/" exists + router = New() + router.GET("/a", func(c *Context) {}) + w = performRequest(router, "GET", "/") + assert.Equal(t, 404, w.Code) +} + +func TestRouteRawPath(t *testing.T) { + route := New() + route.UseRawPath = true + + route.POST("/project/:name/build/:num", func(c *Context) { + name := c.Params.ByName("name") + num := c.Params.ByName("num") + + assert.Equal(t, name, c.Param("name")) + assert.Equal(t, num, c.Param("num")) + + assert.Equal(t, "Some/Other/Project", name) + assert.Equal(t, "222", num) + }) + + w := performRequest(route, "POST", "/project/Some%2FOther%2FProject/build/222") + assert.Equal(t, 200, w.Code) +} + +func TestRouteRawPathNoUnescape(t *testing.T) { + route := New() + route.UseRawPath = true + route.UnescapePathValues = false + + route.POST("/project/:name/build/:num", func(c *Context) { + name := c.Params.ByName("name") + num := c.Params.ByName("num") + + assert.Equal(t, name, c.Param("name")) + assert.Equal(t, num, c.Param("num")) + + assert.Equal(t, "Some%2FOther%2FProject", name) + assert.Equal(t, "333", num) + }) + + w := performRequest(route, "POST", "/project/Some%2FOther%2FProject/build/333") + assert.Equal(t, 200, w.Code) +} + +func TestRouteServeErrorWithWriteHeader(t *testing.T) { + route := New() + route.Use(func(c *Context) { + c.Status(421) + c.Next() + }) + + w := performRequest(route, "GET", "/NotFound") + assert.Equal(t, 421, w.Code) + assert.Equal(t, 0, w.Body.Len()) +} diff --git a/vendor/github.com/gin-gonic/gin/test_helpers.go b/vendor/github.com/gin-gonic/gin/test_helpers.go new file mode 100644 index 000000000..2aed37f20 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/test_helpers.go @@ -0,0 +1,18 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "net/http" +) + +// CreateTestContext returns a fresh engine and context for testing purposes +func CreateTestContext(w http.ResponseWriter) (c *Context, r *Engine) { + r = New() + c = r.allocateContext() + c.reset() + c.writermem.reset(w) + return +} diff --git a/vendor/github.com/gin-gonic/gin/tree.go b/vendor/github.com/gin-gonic/gin/tree.go new file mode 100644 index 000000000..20e3704b3 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/tree.go @@ -0,0 +1,620 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE + +package gin + +import ( + "net/url" + "strings" + "unicode" +) + +// Param is a single URL parameter, consisting of a key and a value. +type Param struct { + Key string + Value string +} + +// Params is a Param-slice, as returned by the router. +// The slice is ordered, the first URL parameter is also the first slice value. +// It is therefore safe to read values by the index. +type Params []Param + +// Get returns the value of the first Param which key matches the given name. +// If no matching Param is found, an empty string is returned. +func (ps Params) Get(name string) (string, bool) { + for _, entry := range ps { + if entry.Key == name { + return entry.Value, true + } + } + return "", false +} + +// ByName returns the value of the first Param which key matches the given name. +// If no matching Param is found, an empty string is returned. +func (ps Params) ByName(name string) (va string) { + va, _ = ps.Get(name) + return +} + +type methodTree struct { + method string + root *node +} + +type methodTrees []methodTree + +func (trees methodTrees) get(method string) *node { + for _, tree := range trees { + if tree.method == method { + return tree.root + } + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +func countParams(path string) uint8 { + var n uint + for i := 0; i < len(path); i++ { + if path[i] != ':' && path[i] != '*' { + continue + } + n++ + } + if n >= 255 { + return 255 + } + return uint8(n) +} + +type nodeType uint8 + +const ( + static nodeType = iota // default + root + param + catchAll +) + +type node struct { + path string + indices string + children []*node + handlers HandlersChain + priority uint32 + nType nodeType + maxParams uint8 + wildChild bool +} + +// increments priority of the given child and reorders if necessary. +func (n *node) incrementChildPrio(pos int) int { + n.children[pos].priority++ + prio := n.children[pos].priority + + // adjust position (move to front) + newPos := pos + for newPos > 0 && n.children[newPos-1].priority < prio { + // swap node positions + n.children[newPos-1], n.children[newPos] = n.children[newPos], n.children[newPos-1] + + newPos-- + } + + // build new index char string + if newPos != pos { + n.indices = n.indices[:newPos] + // unchanged prefix, might be empty + n.indices[pos:pos+1] + // the index char we move + n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos' + } + + return newPos +} + +// addRoute adds a node with the given handle to the path. +// Not concurrency-safe! +func (n *node) addRoute(path string, handlers HandlersChain) { + fullPath := path + n.priority++ + numParams := countParams(path) + + // non-empty tree + if len(n.path) > 0 || len(n.children) > 0 { + walk: + for { + // Update maxParams of the current node + if numParams > n.maxParams { + n.maxParams = numParams + } + + // Find the longest common prefix. + // This also implies that the common prefix contains no ':' or '*' + // since the existing key can't contain those chars. + i := 0 + max := min(len(path), len(n.path)) + for i < max && path[i] == n.path[i] { + i++ + } + + // Split edge + if i < len(n.path) { + child := node{ + path: n.path[i:], + wildChild: n.wildChild, + indices: n.indices, + children: n.children, + handlers: n.handlers, + priority: n.priority - 1, + } + + // Update maxParams (max of all children) + for i := range child.children { + if child.children[i].maxParams > child.maxParams { + child.maxParams = child.children[i].maxParams + } + } + + n.children = []*node{&child} + // []byte for proper unicode char conversion, see #65 + n.indices = string([]byte{n.path[i]}) + n.path = path[:i] + n.handlers = nil + n.wildChild = false + } + + // Make new node a child of this node + if i < len(path) { + path = path[i:] + + if n.wildChild { + n = n.children[0] + n.priority++ + + // Update maxParams of the child node + if numParams > n.maxParams { + n.maxParams = numParams + } + numParams-- + + // Check if the wildcard matches + if len(path) >= len(n.path) && n.path == path[:len(n.path)] { + // check for longer wildcard, e.g. :name and :names + if len(n.path) >= len(path) || path[len(n.path)] == '/' { + continue walk + } + } + + panic("path segment '" + path + + "' conflicts with existing wildcard '" + n.path + + "' in path '" + fullPath + "'") + } + + c := path[0] + + // slash after param + if n.nType == param && c == '/' && len(n.children) == 1 { + n = n.children[0] + n.priority++ + continue walk + } + + // Check if a child with the next path byte exists + for i := 0; i < len(n.indices); i++ { + if c == n.indices[i] { + i = n.incrementChildPrio(i) + n = n.children[i] + continue walk + } + } + + // Otherwise insert it + if c != ':' && c != '*' { + // []byte for proper unicode char conversion, see #65 + n.indices += string([]byte{c}) + child := &node{ + maxParams: numParams, + } + n.children = append(n.children, child) + n.incrementChildPrio(len(n.indices) - 1) + n = child + } + n.insertChild(numParams, path, fullPath, handlers) + return + + } else if i == len(path) { // Make node a (in-path) leaf + if n.handlers != nil { + panic("handlers are already registered for path ''" + fullPath + "'") + } + n.handlers = handlers + } + return + } + } else { // Empty tree + n.insertChild(numParams, path, fullPath, handlers) + n.nType = root + } +} + +func (n *node) insertChild(numParams uint8, path string, fullPath string, handlers HandlersChain) { + var offset int // already handled bytes of the path + + // find prefix until first wildcard (beginning with ':'' or '*'') + for i, max := 0, len(path); numParams > 0; i++ { + c := path[i] + if c != ':' && c != '*' { + continue + } + + // find wildcard end (either '/' or path end) + end := i + 1 + for end < max && path[end] != '/' { + switch path[end] { + // the wildcard name must not contain ':' and '*' + case ':', '*': + panic("only one wildcard per path segment is allowed, has: '" + + path[i:] + "' in path '" + fullPath + "'") + default: + end++ + } + } + + // check if this Node existing children which would be + // unreachable if we insert the wildcard here + if len(n.children) > 0 { + panic("wildcard route '" + path[i:end] + + "' conflicts with existing children in path '" + fullPath + "'") + } + + // check if the wildcard has a name + if end-i < 2 { + panic("wildcards must be named with a non-empty name in path '" + fullPath + "'") + } + + if c == ':' { // param + // split path at the beginning of the wildcard + if i > 0 { + n.path = path[offset:i] + offset = i + } + + child := &node{ + nType: param, + maxParams: numParams, + } + n.children = []*node{child} + n.wildChild = true + n = child + n.priority++ + numParams-- + + // if the path doesn't end with the wildcard, then there + // will be another non-wildcard subpath starting with '/' + if end < max { + n.path = path[offset:end] + offset = end + + child := &node{ + maxParams: numParams, + priority: 1, + } + n.children = []*node{child} + n = child + } + + } else { // catchAll + if end != max || numParams > 1 { + panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'") + } + + if len(n.path) > 0 && n.path[len(n.path)-1] == '/' { + panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'") + } + + // currently fixed width 1 for '/' + i-- + if path[i] != '/' { + panic("no / before catch-all in path '" + fullPath + "'") + } + + n.path = path[offset:i] + + // first node: catchAll node with empty path + child := &node{ + wildChild: true, + nType: catchAll, + maxParams: 1, + } + n.children = []*node{child} + n.indices = string(path[i]) + n = child + n.priority++ + + // second node: node holding the variable + child = &node{ + path: path[i:], + nType: catchAll, + maxParams: 1, + handlers: handlers, + priority: 1, + } + n.children = []*node{child} + + return + } + } + + // insert remaining path part and handle to the leaf + n.path = path[offset:] + n.handlers = handlers +} + +// getValue returns the handle registered with the given path (key). The values of +// wildcards are saved to a map. +// If no handle can be found, a TSR (trailing slash redirect) recommendation is +// made if a handle exists with an extra (without the) trailing slash for the +// given path. +func (n *node) getValue(path string, po Params, unescape bool) (handlers HandlersChain, p Params, tsr bool) { + p = po +walk: // Outer loop for walking the tree + for { + if len(path) > len(n.path) { + if path[:len(n.path)] == n.path { + path = path[len(n.path):] + // If this node does not have a wildcard (param or catchAll) + // child, we can just look up the next child node and continue + // to walk down the tree + if !n.wildChild { + c := path[0] + for i := 0; i < len(n.indices); i++ { + if c == n.indices[i] { + n = n.children[i] + continue walk + } + } + + // Nothing found. + // We can recommend to redirect to the same URL without a + // trailing slash if a leaf exists for that path. + tsr = path == "/" && n.handlers != nil + return + } + + // handle wildcard child + n = n.children[0] + switch n.nType { + case param: + // find param end (either '/' or path end) + end := 0 + for end < len(path) && path[end] != '/' { + end++ + } + + // save param value + if cap(p) < int(n.maxParams) { + p = make(Params, 0, n.maxParams) + } + i := len(p) + p = p[:i+1] // expand slice within preallocated capacity + p[i].Key = n.path[1:] + val := path[:end] + if unescape { + var err error + if p[i].Value, err = url.QueryUnescape(val); err != nil { + p[i].Value = val // fallback, in case of error + } + } else { + p[i].Value = val + } + + // we need to go deeper! + if end < len(path) { + if len(n.children) > 0 { + path = path[end:] + n = n.children[0] + continue walk + } + + // ... but we can't + tsr = len(path) == end+1 + return + } + + if handlers = n.handlers; handlers != nil { + return + } + if len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists for TSR recommendation + n = n.children[0] + tsr = n.path == "/" && n.handlers != nil + } + + return + + case catchAll: + // save param value + if cap(p) < int(n.maxParams) { + p = make(Params, 0, n.maxParams) + } + i := len(p) + p = p[:i+1] // expand slice within preallocated capacity + p[i].Key = n.path[2:] + if unescape { + var err error + if p[i].Value, err = url.QueryUnescape(path); err != nil { + p[i].Value = path // fallback, in case of error + } + } else { + p[i].Value = path + } + + handlers = n.handlers + return + + default: + panic("invalid node type") + } + } + } else if path == n.path { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if handlers = n.handlers; handlers != nil { + return + } + + if path == "/" && n.wildChild && n.nType != root { + tsr = true + return + } + + // No handle found. Check if a handle for this path + a + // trailing slash exists for trailing slash recommendation + for i := 0; i < len(n.indices); i++ { + if n.indices[i] == '/' { + n = n.children[i] + tsr = (len(n.path) == 1 && n.handlers != nil) || + (n.nType == catchAll && n.children[0].handlers != nil) + return + } + } + + return + } + + // Nothing found. We can recommend to redirect to the same URL with an + // extra trailing slash if a leaf exists for that path + tsr = (path == "/") || + (len(n.path) == len(path)+1 && n.path[len(path)] == '/' && + path == n.path[:len(n.path)-1] && n.handlers != nil) + return + } +} + +// findCaseInsensitivePath makes a case-insensitive lookup of the given path and tries to find a handler. +// It can optionally also fix trailing slashes. +// It returns the case-corrected path and a bool indicating whether the lookup +// was successful. +func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) { + ciPath = make([]byte, 0, len(path)+1) // preallocate enough memory + + // Outer loop for walking the tree + for len(path) >= len(n.path) && strings.ToLower(path[:len(n.path)]) == strings.ToLower(n.path) { + path = path[len(n.path):] + ciPath = append(ciPath, n.path...) + + if len(path) > 0 { + // If this node does not have a wildcard (param or catchAll) child, + // we can just look up the next child node and continue to walk down + // the tree + if !n.wildChild { + r := unicode.ToLower(rune(path[0])) + for i, index := range n.indices { + // must use recursive approach since both index and + // ToLower(index) could exist. We must check both. + if r == unicode.ToLower(index) { + out, found := n.children[i].findCaseInsensitivePath(path, fixTrailingSlash) + if found { + return append(ciPath, out...), true + } + } + } + + // Nothing found. We can recommend to redirect to the same URL + // without a trailing slash if a leaf exists for that path + found = fixTrailingSlash && path == "/" && n.handlers != nil + return + } + + n = n.children[0] + switch n.nType { + case param: + // find param end (either '/' or path end) + k := 0 + for k < len(path) && path[k] != '/' { + k++ + } + + // add param value to case insensitive path + ciPath = append(ciPath, path[:k]...) + + // we need to go deeper! + if k < len(path) { + if len(n.children) > 0 { + path = path[k:] + n = n.children[0] + continue + } + + // ... but we can't + if fixTrailingSlash && len(path) == k+1 { + return ciPath, true + } + return + } + + if n.handlers != nil { + return ciPath, true + } else if fixTrailingSlash && len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists + n = n.children[0] + if n.path == "/" && n.handlers != nil { + return append(ciPath, '/'), true + } + } + return + + case catchAll: + return append(ciPath, path...), true + + default: + panic("invalid node type") + } + } else { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if n.handlers != nil { + return ciPath, true + } + + // No handle found. + // Try to fix the path by adding a trailing slash + if fixTrailingSlash { + for i := 0; i < len(n.indices); i++ { + if n.indices[i] == '/' { + n = n.children[i] + if (len(n.path) == 1 && n.handlers != nil) || + (n.nType == catchAll && n.children[0].handlers != nil) { + return append(ciPath, '/'), true + } + return + } + } + } + return + } + } + + // Nothing found. + // Try to fix the path by adding / removing a trailing slash + if fixTrailingSlash { + if path == "/" { + return ciPath, true + } + if len(path)+1 == len(n.path) && n.path[len(path)] == '/' && + strings.ToLower(path) == strings.ToLower(n.path[:len(path)]) && + n.handlers != nil { + return append(ciPath, n.path...), true + } + } + return +} diff --git a/vendor/github.com/gin-gonic/gin/tree_test.go b/vendor/github.com/gin-gonic/gin/tree_test.go new file mode 100644 index 000000000..5bc271712 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/tree_test.go @@ -0,0 +1,666 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE + +package gin + +import ( + "reflect" + "strings" + "testing" +) + +// Used as a workaround since we can't compare functions or their addressses +var fakeHandlerValue string + +func fakeHandler(val string) HandlersChain { + return HandlersChain{func(c *Context) { + fakeHandlerValue = val + }} +} + +type testRequests []struct { + path string + nilHandler bool + route string + ps Params +} + +func checkRequests(t *testing.T, tree *node, requests testRequests, unescapes ...bool) { + unescape := false + if len(unescapes) >= 1 { + unescape = unescapes[0] + } + + for _, request := range requests { + handler, ps, _ := tree.getValue(request.path, nil, unescape) + + if handler == nil { + if !request.nilHandler { + t.Errorf("handle mismatch for route '%s': Expected non-nil handle", request.path) + } + } else if request.nilHandler { + t.Errorf("handle mismatch for route '%s': Expected nil handle", request.path) + } else { + handler[0](nil) + if fakeHandlerValue != request.route { + t.Errorf("handle mismatch for route '%s': Wrong handle (%s != %s)", request.path, fakeHandlerValue, request.route) + } + } + + if !reflect.DeepEqual(ps, request.ps) { + t.Errorf("Params mismatch for route '%s'", request.path) + } + } +} + +func checkPriorities(t *testing.T, n *node) uint32 { + var prio uint32 + for i := range n.children { + prio += checkPriorities(t, n.children[i]) + } + + if n.handlers != nil { + prio++ + } + + if n.priority != prio { + t.Errorf( + "priority mismatch for node '%s': is %d, should be %d", + n.path, n.priority, prio, + ) + } + + return prio +} + +func checkMaxParams(t *testing.T, n *node) uint8 { + var maxParams uint8 + for i := range n.children { + params := checkMaxParams(t, n.children[i]) + if params > maxParams { + maxParams = params + } + } + if n.nType > root && !n.wildChild { + maxParams++ + } + + if n.maxParams != maxParams { + t.Errorf( + "maxParams mismatch for node '%s': is %d, should be %d", + n.path, n.maxParams, maxParams, + ) + } + + return maxParams +} + +func TestCountParams(t *testing.T) { + if countParams("/path/:param1/static/*catch-all") != 2 { + t.Fail() + } + if countParams(strings.Repeat("/:param", 256)) != 255 { + t.Fail() + } +} + +func TestTreeAddAndGet(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/hi", + "/contact", + "/co", + "/c", + "/a", + "/ab", + "/doc/", + "/doc/go_faq.html", + "/doc/go1.html", + "/α", + "/β", + } + for _, route := range routes { + tree.addRoute(route, fakeHandler(route)) + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/a", false, "/a", nil}, + {"/", true, "", nil}, + {"/hi", false, "/hi", nil}, + {"/contact", false, "/contact", nil}, + {"/co", false, "/co", nil}, + {"/con", true, "", nil}, // key mismatch + {"/cona", true, "", nil}, // key mismatch + {"/no", true, "", nil}, // no matching child + {"/ab", false, "/ab", nil}, + {"/α", false, "/α", nil}, + {"/β", false, "/β", nil}, + }) + + checkPriorities(t, tree) + checkMaxParams(t, tree) +} + +func TestTreeWildcard(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/", + "/cmd/:tool/:sub", + "/cmd/:tool/", + "/src/*filepath", + "/search/", + "/search/:query", + "/user_:name", + "/user_:name/about", + "/files/:dir/*filepath", + "/doc/", + "/doc/go_faq.html", + "/doc/go1.html", + "/info/:user/public", + "/info/:user/project/:project", + } + for _, route := range routes { + tree.addRoute(route, fakeHandler(route)) + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/", false, "/", nil}, + {"/cmd/test/", false, "/cmd/:tool/", Params{Param{"tool", "test"}}}, + {"/cmd/test", true, "", Params{Param{"tool", "test"}}}, + {"/cmd/test/3", false, "/cmd/:tool/:sub", Params{Param{"tool", "test"}, Param{"sub", "3"}}}, + {"/src/", false, "/src/*filepath", Params{Param{"filepath", "/"}}}, + {"/src/some/file.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file.png"}}}, + {"/search/", false, "/search/", nil}, + {"/search/someth!ng+in+ünìcodé", false, "/search/:query", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, + {"/search/someth!ng+in+ünìcodé/", true, "", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, + {"/user_gopher", false, "/user_:name", Params{Param{"name", "gopher"}}}, + {"/user_gopher/about", false, "/user_:name/about", Params{Param{"name", "gopher"}}}, + {"/files/js/inc/framework.js", false, "/files/:dir/*filepath", Params{Param{"dir", "js"}, Param{"filepath", "/inc/framework.js"}}}, + {"/info/gordon/public", false, "/info/:user/public", Params{Param{"user", "gordon"}}}, + {"/info/gordon/project/go", false, "/info/:user/project/:project", Params{Param{"user", "gordon"}, Param{"project", "go"}}}, + }) + + checkPriorities(t, tree) + checkMaxParams(t, tree) +} + +func TestUnescapeParameters(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/", + "/cmd/:tool/:sub", + "/cmd/:tool/", + "/src/*filepath", + "/search/:query", + "/files/:dir/*filepath", + "/info/:user/project/:project", + "/info/:user", + } + for _, route := range routes { + tree.addRoute(route, fakeHandler(route)) + } + + //printChildren(tree, "") + unescape := true + checkRequests(t, tree, testRequests{ + {"/", false, "/", nil}, + {"/cmd/test/", false, "/cmd/:tool/", Params{Param{"tool", "test"}}}, + {"/cmd/test", true, "", Params{Param{"tool", "test"}}}, + {"/src/some/file.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file.png"}}}, + {"/src/some/file+test.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file test.png"}}}, + {"/src/some/file++++%%%%test.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file++++%%%%test.png"}}}, + {"/src/some/file%2Ftest.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file/test.png"}}}, + {"/search/someth!ng+in+ünìcodé", false, "/search/:query", Params{Param{"query", "someth!ng in ünìcodé"}}}, + {"/info/gordon/project/go", false, "/info/:user/project/:project", Params{Param{"user", "gordon"}, Param{"project", "go"}}}, + {"/info/slash%2Fgordon", false, "/info/:user", Params{Param{"user", "slash/gordon"}}}, + {"/info/slash%2Fgordon/project/Project%20%231", false, "/info/:user/project/:project", Params{Param{"user", "slash/gordon"}, Param{"project", "Project #1"}}}, + {"/info/slash%%%%", false, "/info/:user", Params{Param{"user", "slash%%%%"}}}, + {"/info/slash%%%%2Fgordon/project/Project%%%%20%231", false, "/info/:user/project/:project", Params{Param{"user", "slash%%%%2Fgordon"}, Param{"project", "Project%%%%20%231"}}}, + }, unescape) + + checkPriorities(t, tree) + checkMaxParams(t, tree) +} + +func catchPanic(testFunc func()) (recv interface{}) { + defer func() { + recv = recover() + }() + + testFunc() + return +} + +type testRoute struct { + path string + conflict bool +} + +func testRoutes(t *testing.T, routes []testRoute) { + tree := &node{} + + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route.path, nil) + }) + + if route.conflict { + if recv == nil { + t.Errorf("no panic for conflicting route '%s'", route.path) + } + } else if recv != nil { + t.Errorf("unexpected panic for route '%s': %v", route.path, recv) + } + } + + //printChildren(tree, "") +} + +func TestTreeWildcardConflict(t *testing.T) { + routes := []testRoute{ + {"/cmd/:tool/:sub", false}, + {"/cmd/vet", true}, + {"/src/*filepath", false}, + {"/src/*filepathx", true}, + {"/src/", true}, + {"/src1/", false}, + {"/src1/*filepath", true}, + {"/src2*filepath", true}, + {"/search/:query", false}, + {"/search/invalid", true}, + {"/user_:name", false}, + {"/user_x", true}, + {"/user_:name", false}, + {"/id:id", false}, + {"/id/:id", true}, + } + testRoutes(t, routes) +} + +func TestTreeChildConflict(t *testing.T) { + routes := []testRoute{ + {"/cmd/vet", false}, + {"/cmd/:tool/:sub", true}, + {"/src/AUTHORS", false}, + {"/src/*filepath", true}, + {"/user_x", false}, + {"/user_:name", true}, + {"/id/:id", false}, + {"/id:id", true}, + {"/:id", true}, + {"/*filepath", true}, + } + testRoutes(t, routes) +} + +func TestTreeDupliatePath(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/", + "/doc/", + "/src/*filepath", + "/search/:query", + "/user_:name", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + + // Add again + recv = catchPanic(func() { + tree.addRoute(route, nil) + }) + if recv == nil { + t.Fatalf("no panic while inserting duplicate route '%s", route) + } + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/", false, "/", nil}, + {"/doc/", false, "/doc/", nil}, + {"/src/some/file.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file.png"}}}, + {"/search/someth!ng+in+ünìcodé", false, "/search/:query", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, + {"/user_gopher", false, "/user_:name", Params{Param{"name", "gopher"}}}, + }) +} + +func TestEmptyWildcardName(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/user:", + "/user:/", + "/cmd/:/", + "/src/*", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, nil) + }) + if recv == nil { + t.Fatalf("no panic while inserting route with empty wildcard name '%s", route) + } + } +} + +func TestTreeCatchAllConflict(t *testing.T) { + routes := []testRoute{ + {"/src/*filepath/x", true}, + {"/src2/", false}, + {"/src2/*filepath/x", true}, + } + testRoutes(t, routes) +} + +func TestTreeCatchAllConflictRoot(t *testing.T) { + routes := []testRoute{ + {"/", false}, + {"/*filepath", true}, + } + testRoutes(t, routes) +} + +func TestTreeDoubleWildcard(t *testing.T) { + const panicMsg = "only one wildcard per path segment is allowed" + + routes := [...]string{ + "/:foo:bar", + "/:foo:bar/", + "/:foo*bar", + } + + for _, route := range routes { + tree := &node{} + recv := catchPanic(func() { + tree.addRoute(route, nil) + }) + + if rs, ok := recv.(string); !ok || !strings.HasPrefix(rs, panicMsg) { + t.Fatalf(`"Expected panic "%s" for route '%s', got "%v"`, panicMsg, route, recv) + } + } +} + +/*func TestTreeDuplicateWildcard(t *testing.T) { + tree := &node{} + routes := [...]string{ + "/:id/:name/:id", + } + for _, route := range routes { + ... + } +}*/ + +func TestTreeTrailingSlashRedirect(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/hi", + "/b/", + "/search/:query", + "/cmd/:tool/", + "/src/*filepath", + "/x", + "/x/y", + "/y/", + "/y/z", + "/0/:id", + "/0/:id/1", + "/1/:id/", + "/1/:id/2", + "/aa", + "/a/", + "/admin", + "/admin/:category", + "/admin/:category/:page", + "/doc", + "/doc/go_faq.html", + "/doc/go1.html", + "/no/a", + "/no/b", + "/api/hello/:name", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + } + + //printChildren(tree, "") + + tsrRoutes := [...]string{ + "/hi/", + "/b", + "/search/gopher/", + "/cmd/vet", + "/src", + "/x/", + "/y", + "/0/go/", + "/1/go", + "/a", + "/admin/", + "/admin/config/", + "/admin/config/permissions/", + "/doc/", + } + for _, route := range tsrRoutes { + handler, _, tsr := tree.getValue(route, nil, false) + if handler != nil { + t.Fatalf("non-nil handler for TSR route '%s", route) + } else if !tsr { + t.Errorf("expected TSR recommendation for route '%s'", route) + } + } + + noTsrRoutes := [...]string{ + "/", + "/no", + "/no/", + "/_", + "/_/", + "/api/world/abc", + } + for _, route := range noTsrRoutes { + handler, _, tsr := tree.getValue(route, nil, false) + if handler != nil { + t.Fatalf("non-nil handler for No-TSR route '%s", route) + } else if tsr { + t.Errorf("expected no TSR recommendation for route '%s'", route) + } + } +} + +func TestTreeRootTrailingSlashRedirect(t *testing.T) { + tree := &node{} + + recv := catchPanic(func() { + tree.addRoute("/:test", fakeHandler("/:test")) + }) + if recv != nil { + t.Fatalf("panic inserting test route: %v", recv) + } + + handler, _, tsr := tree.getValue("/", nil, false) + if handler != nil { + t.Fatalf("non-nil handler") + } else if tsr { + t.Errorf("expected no TSR recommendation") + } +} + +func TestTreeFindCaseInsensitivePath(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/hi", + "/b/", + "/ABC/", + "/search/:query", + "/cmd/:tool/", + "/src/*filepath", + "/x", + "/x/y", + "/y/", + "/y/z", + "/0/:id", + "/0/:id/1", + "/1/:id/", + "/1/:id/2", + "/aa", + "/a/", + "/doc", + "/doc/go_faq.html", + "/doc/go1.html", + "/doc/go/away", + "/no/a", + "/no/b", + } + + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + } + + // Check out == in for all registered routes + // With fixTrailingSlash = true + for _, route := range routes { + out, found := tree.findCaseInsensitivePath(route, true) + if !found { + t.Errorf("Route '%s' not found!", route) + } else if string(out) != route { + t.Errorf("Wrong result for route '%s': %s", route, string(out)) + } + } + // With fixTrailingSlash = false + for _, route := range routes { + out, found := tree.findCaseInsensitivePath(route, false) + if !found { + t.Errorf("Route '%s' not found!", route) + } else if string(out) != route { + t.Errorf("Wrong result for route '%s': %s", route, string(out)) + } + } + + tests := []struct { + in string + out string + found bool + slash bool + }{ + {"/HI", "/hi", true, false}, + {"/HI/", "/hi", true, true}, + {"/B", "/b/", true, true}, + {"/B/", "/b/", true, false}, + {"/abc", "/ABC/", true, true}, + {"/abc/", "/ABC/", true, false}, + {"/aBc", "/ABC/", true, true}, + {"/aBc/", "/ABC/", true, false}, + {"/abC", "/ABC/", true, true}, + {"/abC/", "/ABC/", true, false}, + {"/SEARCH/QUERY", "/search/QUERY", true, false}, + {"/SEARCH/QUERY/", "/search/QUERY", true, true}, + {"/CMD/TOOL/", "/cmd/TOOL/", true, false}, + {"/CMD/TOOL", "/cmd/TOOL/", true, true}, + {"/SRC/FILE/PATH", "/src/FILE/PATH", true, false}, + {"/x/Y", "/x/y", true, false}, + {"/x/Y/", "/x/y", true, true}, + {"/X/y", "/x/y", true, false}, + {"/X/y/", "/x/y", true, true}, + {"/X/Y", "/x/y", true, false}, + {"/X/Y/", "/x/y", true, true}, + {"/Y/", "/y/", true, false}, + {"/Y", "/y/", true, true}, + {"/Y/z", "/y/z", true, false}, + {"/Y/z/", "/y/z", true, true}, + {"/Y/Z", "/y/z", true, false}, + {"/Y/Z/", "/y/z", true, true}, + {"/y/Z", "/y/z", true, false}, + {"/y/Z/", "/y/z", true, true}, + {"/Aa", "/aa", true, false}, + {"/Aa/", "/aa", true, true}, + {"/AA", "/aa", true, false}, + {"/AA/", "/aa", true, true}, + {"/aA", "/aa", true, false}, + {"/aA/", "/aa", true, true}, + {"/A/", "/a/", true, false}, + {"/A", "/a/", true, true}, + {"/DOC", "/doc", true, false}, + {"/DOC/", "/doc", true, true}, + {"/NO", "", false, true}, + {"/DOC/GO", "", false, true}, + } + // With fixTrailingSlash = true + for _, test := range tests { + out, found := tree.findCaseInsensitivePath(test.in, true) + if found != test.found || (found && (string(out) != test.out)) { + t.Errorf("Wrong result for '%s': got %s, %t; want %s, %t", + test.in, string(out), found, test.out, test.found) + return + } + } + // With fixTrailingSlash = false + for _, test := range tests { + out, found := tree.findCaseInsensitivePath(test.in, false) + if test.slash { + if found { // test needs a trailingSlash fix. It must not be found! + t.Errorf("Found without fixTrailingSlash: %s; got %s", test.in, string(out)) + } + } else { + if found != test.found || (found && (string(out) != test.out)) { + t.Errorf("Wrong result for '%s': got %s, %t; want %s, %t", + test.in, string(out), found, test.out, test.found) + return + } + } + } +} + +func TestTreeInvalidNodeType(t *testing.T) { + const panicMsg = "invalid node type" + + tree := &node{} + tree.addRoute("/", fakeHandler("/")) + tree.addRoute("/:page", fakeHandler("/:page")) + + // set invalid node type + tree.children[0].nType = 42 + + // normal lookup + recv := catchPanic(func() { + tree.getValue("/test", nil, false) + }) + if rs, ok := recv.(string); !ok || rs != panicMsg { + t.Fatalf("Expected panic '"+panicMsg+"', got '%v'", recv) + } + + // case-insensitive lookup + recv = catchPanic(func() { + tree.findCaseInsensitivePath("/test", true) + }) + if rs, ok := recv.(string); !ok || rs != panicMsg { + t.Fatalf("Expected panic '"+panicMsg+"', got '%v'", recv) + } +} diff --git a/vendor/github.com/gin-gonic/gin/utils.go b/vendor/github.com/gin-gonic/gin/utils.go new file mode 100644 index 000000000..99d19af4e --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/utils.go @@ -0,0 +1,154 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "encoding/xml" + "net/http" + "os" + "path" + "reflect" + "runtime" + "strings" +) + +const BindKey = "_gin-gonic/gin/bindkey" + +func Bind(val interface{}) HandlerFunc { + value := reflect.ValueOf(val) + if value.Kind() == reflect.Ptr { + panic(`Bind struct can not be a pointer. Example: + Use: gin.Bind(Struct{}) instead of gin.Bind(&Struct{}) +`) + } + typ := value.Type() + + return func(c *Context) { + obj := reflect.New(typ).Interface() + if c.Bind(obj) == nil { + c.Set(BindKey, obj) + } + } +} + +// WrapF is a helper function for wrapping http.HandlerFunc +// Returns a Gin middleware +func WrapF(f http.HandlerFunc) HandlerFunc { + return func(c *Context) { + f(c.Writer, c.Request) + } +} + +// WrapH is a helper function for wrapping http.Handler +// Returns a Gin middleware +func WrapH(h http.Handler) HandlerFunc { + return func(c *Context) { + h.ServeHTTP(c.Writer, c.Request) + } +} + +// H is a shortcup for map[string]interface{} +type H map[string]interface{} + +// MarshalXML allows type H to be used with xml.Marshal. +func (h H) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name = xml.Name{ + Space: "", + Local: "map", + } + if err := e.EncodeToken(start); err != nil { + return err + } + for key, value := range h { + elem := xml.StartElement{ + Name: xml.Name{Space: "", Local: key}, + Attr: []xml.Attr{}, + } + if err := e.EncodeElement(value, elem); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +func assert1(guard bool, text string) { + if !guard { + panic(text) + } +} + +func filterFlags(content string) string { + for i, char := range content { + if char == ' ' || char == ';' { + return content[:i] + } + } + return content +} + +func chooseData(custom, wildcard interface{}) interface{} { + if custom == nil { + if wildcard == nil { + panic("negotiation config is invalid") + } + return wildcard + } + return custom +} + +func parseAccept(acceptHeader string) []string { + parts := strings.Split(acceptHeader, ",") + out := make([]string, 0, len(parts)) + for _, part := range parts { + if index := strings.IndexByte(part, ';'); index >= 0 { + part = part[0:index] + } + if part = strings.TrimSpace(part); part != "" { + out = append(out, part) + } + } + return out +} + +func lastChar(str string) uint8 { + if str == "" { + panic("The length of the string can't be 0") + } + return str[len(str)-1] +} + +func nameOfFunction(f interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name() +} + +func joinPaths(absolutePath, relativePath string) string { + if relativePath == "" { + return absolutePath + } + + finalPath := path.Join(absolutePath, relativePath) + appendSlash := lastChar(relativePath) == '/' && lastChar(finalPath) != '/' + if appendSlash { + return finalPath + "/" + } + return finalPath +} + +func resolveAddress(addr []string) string { + switch len(addr) { + case 0: + if port := os.Getenv("PORT"); port != "" { + debugPrint("Environment variable PORT=\"%s\"", port) + return ":" + port + } + debugPrint("Environment variable PORT is undefined. Using port :8080 by default") + return ":8080" + case 1: + return addr[0] + default: + panic("too much parameters") + } +} diff --git a/vendor/github.com/gin-gonic/gin/utils_test.go b/vendor/github.com/gin-gonic/gin/utils_test.go new file mode 100644 index 000000000..3d019e7e2 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/utils_test.go @@ -0,0 +1,126 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" +) + +func init() { + SetMode(TestMode) +} + +type testStruct struct { + T *testing.T +} + +func (t *testStruct) ServeHTTP(w http.ResponseWriter, req *http.Request) { + assert.Equal(t.T, "POST", req.Method) + assert.Equal(t.T, "/path", req.URL.Path) + w.WriteHeader(500) + fmt.Fprint(w, "hello") +} + +func TestWrap(t *testing.T) { + router := New() + router.POST("/path", WrapH(&testStruct{t})) + router.GET("/path2", WrapF(func(w http.ResponseWriter, req *http.Request) { + assert.Equal(t, "GET", req.Method) + assert.Equal(t, "/path2", req.URL.Path) + w.WriteHeader(400) + fmt.Fprint(w, "hola!") + })) + + w := performRequest(router, "POST", "/path") + assert.Equal(t, 500, w.Code) + assert.Equal(t, "hello", w.Body.String()) + + w = performRequest(router, "GET", "/path2") + assert.Equal(t, 400, w.Code) + assert.Equal(t, "hola!", w.Body.String()) +} + +func TestLastChar(t *testing.T) { + assert.Equal(t, uint8('a'), lastChar("hola")) + assert.Equal(t, uint8('s'), lastChar("adios")) + assert.Panics(t, func() { lastChar("") }) +} + +func TestParseAccept(t *testing.T) { + parts := parseAccept("text/html , application/xhtml+xml,application/xml;q=0.9, */* ;q=0.8") + assert.Len(t, parts, 4) + assert.Equal(t, "text/html", parts[0]) + assert.Equal(t, "application/xhtml+xml", parts[1]) + assert.Equal(t, "application/xml", parts[2]) + assert.Equal(t, "*/*", parts[3]) +} + +func TestChooseData(t *testing.T) { + A := "a" + B := "b" + assert.Equal(t, A, chooseData(A, B)) + assert.Equal(t, B, chooseData(nil, B)) + assert.Panics(t, func() { chooseData(nil, nil) }) +} + +func TestFilterFlags(t *testing.T) { + result := filterFlags("text/html ") + assert.Equal(t, "text/html", result) + + result = filterFlags("text/html;") + assert.Equal(t, "text/html", result) +} + +func TestFunctionName(t *testing.T) { + assert.Regexp(t, `^(.*/vendor/)?github.com/gin-gonic/gin.somefunction$`, nameOfFunction(somefunction)) +} + +func somefunction() { + // this empty function is used by TestFunctionName() +} + +func TestJoinPaths(t *testing.T) { + assert.Equal(t, "", joinPaths("", "")) + assert.Equal(t, "/", joinPaths("", "/")) + assert.Equal(t, "/a", joinPaths("/a", "")) + assert.Equal(t, "/a/", joinPaths("/a/", "")) + assert.Equal(t, "/a/", joinPaths("/a/", "/")) + assert.Equal(t, "/a/", joinPaths("/a", "/")) + assert.Equal(t, "/a/hola", joinPaths("/a", "/hola")) + assert.Equal(t, "/a/hola", joinPaths("/a/", "/hola")) + assert.Equal(t, "/a/hola/", joinPaths("/a/", "/hola/")) + assert.Equal(t, "/a/hola/", joinPaths("/a/", "/hola//")) +} + +type bindTestStruct struct { + Foo string `form:"foo" binding:"required"` + Bar int `form:"bar" binding:"min=4"` +} + +func TestBindMiddleware(t *testing.T) { + var value *bindTestStruct + var called bool + router := New() + router.GET("/", Bind(bindTestStruct{}), func(c *Context) { + called = true + value = c.MustGet(BindKey).(*bindTestStruct) + }) + performRequest(router, "GET", "/?foo=hola&bar=10") + assert.True(t, called) + assert.Equal(t, "hola", value.Foo) + assert.Equal(t, 10, value.Bar) + + called = false + performRequest(router, "GET", "/?foo=hola&bar=1") + assert.False(t, called) + + assert.Panics(t, func() { + Bind(&bindTestStruct{}) + }) +} diff --git a/vendor/github.com/gin-gonic/gin/vendor/vendor.json b/vendor/github.com/gin-gonic/gin/vendor/vendor.json new file mode 100644 index 000000000..573abe24e --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/vendor/vendor.json @@ -0,0 +1,127 @@ +{ + "comment": "v1.2", + "ignore": "test", + "package": [ + { + "checksumSHA1": "dvabztWVQX8f6oMLRyv4dLH+TGY=", + "comment": "v1.1.0", + "path": "github.com/davecgh/go-spew/spew", + "revision": "346938d642f2ec3594ed81d874461961cd0faa76", + "revisionTime": "2016-10-29T20:57:26Z" + }, + { + "checksumSHA1": "7c3FuEadBInl/4ExSrB7iJMXpe4=", + "path": "github.com/dustin/go-broadcast", + "revision": "3bdf6d4a7164a50bc19d5f230e2981d87d2584f1", + "revisionTime": "2014-06-27T04:00:55Z" + }, + { + "checksumSHA1": "QeKwBtN2df+j+4stw3bQJ6yO4EY=", + "path": "github.com/gin-contrib/sse", + "revision": "22d885f9ecc78bf4ee5d72b937e4bbcdc58e8cae", + "revisionTime": "2017-01-09T09:34:21Z" + }, + { + "checksumSHA1": "+vZNyF2MykVjenLg1TpjjgjthV0=", + "path": "github.com/gin-gonic/autotls", + "revision": "8ca25fbde72bb72a00466215b94b489c71fcb815", + "revisionTime": "2017-09-16T16:54:15Z" + }, + { + "checksumSHA1": "qlPUeFabwF4RKAOF1H+yBFU1Veg=", + "path": "github.com/golang/protobuf/proto", + "revision": "5a0f697c9ed9d68fef0116532c6e05cfeae00e55", + "revisionTime": "2017-06-01T23:02:30Z" + }, + { + "checksumSHA1": "Ajh8TemnItg4nn+jKmVcsMRALBc=", + "path": "github.com/json-iterator/go", + "revision": "36b14963da70d11297d313183d7e6388c8510e1e", + "revisionTime": "2017-08-29T15:58:51Z" + }, + { + "checksumSHA1": "9if9IBLsxkarJ804NPWAzgskIAk=", + "path": "github.com/manucorporat/stats", + "revision": "8f2d6ace262eba462e9beb552382c98be51d807b", + "revisionTime": "2015-05-31T20:46:25Z" + }, + { + "checksumSHA1": "U6lX43KDDlNOn+Z0Yyww+ZzHfFo=", + "path": "github.com/mattn/go-isatty", + "revision": "57fdcb988a5c543893cc61bce354a6e24ab70022", + "revisionTime": "2017-03-07T16:30:44Z" + }, + { + "checksumSHA1": "LuFv4/jlrmFNnDb/5SCSEPAM9vU=", + "comment": "v1.0.0", + "path": "github.com/pmezard/go-difflib/difflib", + "revision": "792786c7400a136282c1664665ae0a8db921c6c2", + "revisionTime": "2016-01-10T10:55:54Z" + }, + { + "checksumSHA1": "Q2V7Zs3diLmLfmfbiuLpSxETSuY=", + "comment": "v1.1.4", + "path": "github.com/stretchr/testify/assert", + "revision": "976c720a22c8eb4eb6a0b4348ad85ad12491a506", + "revisionTime": "2016-09-25T22:06:09Z" + }, + { + "checksumSHA1": "IopMW+arBezL5bqOfrVU6UEfn28=", + "path": "github.com/thinkerou/favicon", + "revision": "94a442a49da6e2d44bdd5e0d2e2e185c43a19d93", + "revisionTime": "2017-07-10T14:05:20Z" + }, + { + "checksumSHA1": "CoxdaTYdPZNJXr8mJfLxye428N0=", + "path": "github.com/ugorji/go/codec", + "revision": "c88ee250d0221a57af388746f5cf03768c21d6e2", + "revisionTime": "2017-02-15T20:11:44Z" + }, + { + "checksumSHA1": "W0j4I7QpxXlChjyhAojZmFjU6Bg=", + "path": "golang.org/x/crypto/acme", + "revision": "adbae1b6b6fb4b02448a0fc0dbbc9ba2b95b294d", + "revisionTime": "2017-06-19T06:03:41Z" + }, + { + "checksumSHA1": "TrKJW+flz7JulXU3sqnBJjGzgQc=", + "path": "golang.org/x/crypto/acme/autocert", + "revision": "adbae1b6b6fb4b02448a0fc0dbbc9ba2b95b294d", + "revisionTime": "2017-06-19T06:03:41Z" + }, + { + "checksumSHA1": "9jjO5GjLa0XF/nfWihF02RoH4qc=", + "comment": "release-branch.go1.7", + "path": "golang.org/x/net/context", + "revision": "d4c55e66d8c3a2f3382d264b08e3e3454a66355a", + "revisionTime": "2016-10-18T08:54:36Z" + }, + { + "checksumSHA1": "S0DP7Pn7sZUmXc55IzZnNvERu6s=", + "path": "golang.org/x/sync/errgroup", + "revision": "8e0aa688b654ef28caa72506fa5ec8dba9fc7690", + "revisionTime": "2017-07-19T03:38:01Z" + }, + { + "checksumSHA1": "TVEkpH3gq84iQ39I4R+mlDwjuVI=", + "path": "golang.org/x/sys/unix", + "revision": "99f16d856c9836c42d24e7ab64ea72916925fa97", + "revisionTime": "2017-03-08T15:04:45Z" + }, + { + "checksumSHA1": "39V1idWER42Lmcmg2Uy40wMzOlo=", + "comment": "v8.18.1", + "path": "gopkg.in/go-playground/validator.v8", + "revision": "5f57d2222ad794d0dffb07e664ea05e2ee07d60c", + "revisionTime": "2016-07-18T13:41:25Z" + }, + { + "checksumSHA1": "12GqsW8PiRPnezDDy0v4brZrndM=", + "comment": "v2", + "path": "gopkg.in/yaml.v2", + "revision": "a5b47d31c556af34a302ce5d659e6fea44d90de0", + "revisionTime": "2016-09-28T15:37:09Z" + } + ], + "rootPath": "github.com/gin-gonic/gin" +} diff --git a/vendor/github.com/gin-gonic/gin/wercker.yml b/vendor/github.com/gin-gonic/gin/wercker.yml new file mode 100644 index 000000000..3ab8084cc --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/wercker.yml @@ -0,0 +1 @@ +box: wercker/default \ No newline at end of file diff --git a/vendor/github.com/go-kit/kit/.travis.yml b/vendor/github.com/go-kit/kit/.travis.yml new file mode 100644 index 000000000..f77ab8d44 --- /dev/null +++ b/vendor/github.com/go-kit/kit/.travis.yml @@ -0,0 +1,17 @@ +language: go + +env: + - COVERALLS_TOKEN=MYSkSqcsWXd6DmP6TnSeiDhtvuL4u6ndp + +before_install: + - go get github.com/mattn/goveralls + - go get github.com/modocache/gover + +script: + - go test -race -v ./... + - ./coveralls.bash + +go: + - 1.9.x + - 1.10.x + - tip diff --git a/vendor/github.com/go-kit/kit/CONTRIBUTING.md b/vendor/github.com/go-kit/kit/CONTRIBUTING.md new file mode 100644 index 000000000..c0751f85a --- /dev/null +++ b/vendor/github.com/go-kit/kit/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +First, thank you for contributing! We love and encourage pull requests from everyone. + +Before submitting major changes, here are a few guidelines to follow: + +1. Check the [open issues][issues] and [pull requests][prs] for existing discussions. +1. Open an [issue][issues] first, to discuss a new feature or enhancement. +1. Write tests, and make sure the test suite passes locally and on CI. +1. Open a pull request, and reference the relevant issue(s). +1. After receiving feedback, [squash your commits][squash] and add a [great commit message][message]. +1. Have fun! + +[issues]: https://github.com/go-kit/kit/issues +[prs]: https://github.com/go-kit/kit/pulls +[squash]: http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html +[message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html + diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE new file mode 100644 index 000000000..9d83342ac --- /dev/null +++ b/vendor/github.com/go-kit/kit/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Peter Bourgon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/go-kit/kit/README.md b/vendor/github.com/go-kit/kit/README.md new file mode 100644 index 000000000..6b8aaf445 --- /dev/null +++ b/vendor/github.com/go-kit/kit/README.md @@ -0,0 +1,119 @@ +# Go kit [![Circle CI](https://circleci.com/gh/go-kit/kit.svg?style=shield)](https://circleci.com/gh/go-kit/kit) [![Travis CI](https://travis-ci.org/go-kit/kit.svg?branch=master)](https://travis-ci.org/go-kit/kit) [![GoDoc](https://godoc.org/github.com/go-kit/kit?status.svg)](https://godoc.org/github.com/go-kit/kit) [![Coverage Status](https://coveralls.io/repos/go-kit/kit/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-kit/kit?branch=master) [![Go Report Card](https://goreportcard.com/badge/go-kit/kit)](https://goreportcard.com/report/go-kit/kit) [![Sourcegraph](https://sourcegraph.com/github.com/go-kit/kit/-/badge.svg)](https://sourcegraph.com/github.com/go-kit/kit?badge) + +**Go kit** is a **programming toolkit** for building microservices +(or elegant monoliths) in Go. We solve common problems in distributed +systems and application architecture so you can focus on delivering +business value. + +- Website: [gokit.io](https://gokit.io) +- Mailing list: [go-kit](https://groups.google.com/forum/#!forum/go-kit) +- Slack: [gophers.slack.com](https://gophers.slack.com) **#go-kit** ([invite](https://gophersinvite.herokuapp.com/)) + +## Motivation + +Go has emerged as the language of the server, but it remains underrepresented +in so-called "modern enterprise" companies like Facebook, Twitter, Netflix, and +SoundCloud. Many of these organizations have turned to JVM-based stacks for +their business logic, owing in large part to libraries and ecosystems that +directly support their microservice architectures. + +To reach its next level of success, Go needs more than simple primitives and +idioms. It needs a comprehensive toolkit, for coherent distributed programming +in the large. Go kit is a set of packages and best practices, which provide a +comprehensive, robust, and trustable way of building microservices for +organizations of any size. + +For more details, see + [the website](https://gokit.io), + [the motivating blog post](http://peter.bourgon.org/go-kit/) and + [the video of the talk](https://www.youtube.com/watch?v=iFR_7AKkJFU). +See also the + [Go kit talk at GopherCon 2015](https://www.youtube.com/watch?v=1AjaZi4QuGo). + +## Goals + +- Operate in a heterogeneous SOA — expect to interact with mostly non-Go-kit services +- RPC as the primary messaging pattern +- Pluggable serialization and transport — not just JSON over HTTP +- Operate within existing infrastructures — no mandates for specific tools or technologies + +## Non-goals + +- Supporting messaging patterns other than RPC (for now) — e.g. MPI, pub/sub, CQRS, etc. +- Re-implementing functionality that can be provided by adapting existing software +- Having opinions on operational concerns: deployment, configuration, process supervision, orchestration, etc. + +## Contributing + +Please see [CONTRIBUTING.md](/CONTRIBUTING.md). +Thank you, [contributors](https://github.com/go-kit/kit/graphs/contributors)! + +## Dependency management + +Go kit is a library, designed to be imported into a binary package. Vendoring +is currently the best way for binary package authors to ensure reliable, +reproducible builds. Therefore, we strongly recommend our users use vendoring +for all of their dependencies, including Go kit. To avoid compatibility and +availability issues, Go kit doesn't vendor its own dependencies, and +doesn't recommend use of third-party import proxies. + +There are several tools which make vendoring easier, including + [dep](https://github.com/golang/dep), + [gb](http://getgb.io), + [glide](https://github.com/Masterminds/glide), + [gvt](https://github.com/FiloSottile/gvt), and + [govendor](https://github.com/kardianos/govendor). +In addition, Go kit uses a variety of continuous integration providers + to find and fix compatibility problems as soon as they occur. + +## Related projects + +Projects with a ★ have had particular influence on Go kit's design (or vice-versa). + +### Service frameworks + +- [gizmo](https://github.com/nytimes/gizmo), a microservice toolkit from The New York Times ★ +- [go-micro](https://github.com/myodc/go-micro), a microservices client/server library ★ +- [gotalk](https://github.com/rsms/gotalk), async peer communication protocol & library +- [Kite](https://github.com/koding/kite), a micro-service framework +- [gocircuit](https://github.com/gocircuit/circuit), dynamic cloud orchestration + +### Individual components + +- [afex/hystrix-go](https://github.com/afex/hystrix-go), client-side latency and fault tolerance library +- [armon/go-metrics](https://github.com/armon/go-metrics), library for exporting performance and runtime metrics to external metrics systems +- [codahale/lunk](https://github.com/codahale/lunk), structured logging in the style of Google's Dapper or Twitter's Zipkin +- [eapache/go-resiliency](https://github.com/eapache/go-resiliency), resiliency patterns +- [sasbury/logging](https://github.com/sasbury/logging), a tagged style of logging +- [grpc/grpc-go](https://github.com/grpc/grpc-go), HTTP/2 based RPC +- [inconshreveable/log15](https://github.com/inconshreveable/log15), simple, powerful logging for Go ★ +- [mailgun/vulcand](https://github.com/vulcand/vulcand), programmatic load balancer backed by etcd +- [mattheath/phosphor](https://github.com/mondough/phosphor), distributed system tracing +- [pivotal-golang/lager](https://github.com/pivotal-golang/lager), an opinionated logging library +- [rubyist/circuitbreaker](https://github.com/rubyist/circuitbreaker), circuit breaker library +- [sirupsen/logrus](https://github.com/sirupsen/logrus), structured, pluggable logging for Go ★ +- [sourcegraph/appdash](https://github.com/sourcegraph/appdash), application tracing system based on Google's Dapper +- [spacemonkeygo/monitor](https://github.com/spacemonkeygo/monitor), data collection, monitoring, instrumentation, and Zipkin client library +- [streadway/handy](https://github.com/streadway/handy), net/http handler filters +- [vitess/rpcplus](https://godoc.org/github.com/youtube/vitess/go/rpcplus), package rpc + context.Context +- [gdamore/mangos](https://github.com/gdamore/mangos), nanomsg implementation in pure Go + +### Web frameworks + +- [Gorilla](http://www.gorillatoolkit.org) +- [Gin](https://gin-gonic.github.io/gin/) +- [Negroni](https://github.com/codegangsta/negroni) +- [Goji](https://github.com/zenazn/goji) +- [Martini](https://github.com/go-martini/martini) +- [Beego](http://beego.me/) +- [Revel](https://revel.github.io/) (considered [harmful](https://github.com/go-kit/kit/issues/350)) + +## Additional reading + +- [Architecting for the Cloud](http://fr.slideshare.net/stonse/architecting-for-the-cloud-using-netflixoss-codemash-workshop-29852233) — Netflix +- [Dapper, a Large-Scale Distributed Systems Tracing Infrastructure](http://research.google.com/pubs/pub36356.html) — Google +- [Your Server as a Function](http://monkey.org/~marius/funsrv.pdf) (PDF) — Twitter + +--- + +Development supported by [DigitalOcean](https://digitalocean.com). diff --git a/vendor/github.com/go-kit/kit/ROADMAP.md b/vendor/github.com/go-kit/kit/ROADMAP.md new file mode 100644 index 000000000..5c462aa21 --- /dev/null +++ b/vendor/github.com/go-kit/kit/ROADMAP.md @@ -0,0 +1,17 @@ +# Roadmap + +This is a coarse-grained roadmap of Go kit development direction in the short +to mid-term future. It will be kept reasonably up-to-date by the project +maintainers. Suggest new ideas, enhancements, and features using the standard +[issues](https://github.com/go-kit/kit/issues) model. + +## Prioritized + +1. kitgen code generation (#308, #70) +1. package pubsub (#298, #295) + +## Unprioritized + +- package log/levels refactor (#250, #269, #252) +- package auth/jwt (#255) + diff --git a/vendor/github.com/go-kit/kit/auth/basic/README.md b/vendor/github.com/go-kit/kit/auth/basic/README.md new file mode 100644 index 000000000..26d6c4b31 --- /dev/null +++ b/vendor/github.com/go-kit/kit/auth/basic/README.md @@ -0,0 +1,20 @@ +This package provides a Basic Authentication middleware. + +It'll try to compare credentials from Authentication request header to a username/password pair in middleware constructor. + +More details about this type of authentication can be found in [Mozilla article](https://developer.mozilla.org/en-US/docs/Web/HTTP/Authentication). + +## Usage + +```go +import httptransport "github.com/go-kit/kit/transport/http" + +httptransport.NewServer( + AuthMiddleware(cfg.auth.user, cfg.auth.password, "Example Realm")(makeUppercaseEndpoint()), + decodeMappingsRequest, + httptransport.EncodeJSONResponse, + httptransport.ServerBefore(httptransport.PopulateRequestContext), + ) +``` + +For AuthMiddleware to be able to pick up the Authentication header from an HTTP request we need to pass it through the context with something like ```httptransport.ServerBefore(httptransport.PopulateRequestContext)```. \ No newline at end of file diff --git a/vendor/github.com/go-kit/kit/auth/basic/middleware.go b/vendor/github.com/go-kit/kit/auth/basic/middleware.go new file mode 100644 index 000000000..ad7e4085d --- /dev/null +++ b/vendor/github.com/go-kit/kit/auth/basic/middleware.go @@ -0,0 +1,94 @@ +package basic + +import ( + "bytes" + "context" + "crypto/sha256" + "crypto/subtle" + "encoding/base64" + "fmt" + "net/http" + "strings" + + "github.com/go-kit/kit/endpoint" + httptransport "github.com/go-kit/kit/transport/http" +) + +// AuthError represents an authorization error. +type AuthError struct { + Realm string +} + +// StatusCode is an implementation of the StatusCoder interface in go-kit/http. +func (AuthError) StatusCode() int { + return http.StatusUnauthorized +} + +// Error is an implementation of the Error interface. +func (AuthError) Error() string { + return http.StatusText(http.StatusUnauthorized) +} + +// Headers is an implementation of the Headerer interface in go-kit/http. +func (e AuthError) Headers() http.Header { + return http.Header{ + "Content-Type": []string{"text/plain; charset=utf-8"}, + "X-Content-Type-Options": []string{"nosniff"}, + "WWW-Authenticate": []string{fmt.Sprintf(`Basic realm=%q`, e.Realm)}, + } +} + +// parseBasicAuth parses an HTTP Basic Authentication string. +// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ([]byte("Aladdin"), []byte("open sesame"), true). +func parseBasicAuth(auth string) (username, password []byte, ok bool) { + const prefix = "Basic " + if !strings.HasPrefix(auth, prefix) { + return + } + c, err := base64.StdEncoding.DecodeString(auth[len(prefix):]) + if err != nil { + return + } + + s := bytes.IndexByte(c, ':') + if s < 0 { + return + } + return c[:s], c[s+1:], true +} + +// Returns a hash of a given slice. +func toHashSlice(s []byte) []byte { + hash := sha256.Sum256(s) + return hash[:] +} + +// AuthMiddleware returns a Basic Authentication middleware for a particular user and password. +func AuthMiddleware(requiredUser, requiredPassword, realm string) endpoint.Middleware { + requiredUserBytes := toHashSlice([]byte(requiredUser)) + requiredPasswordBytes := toHashSlice([]byte(requiredPassword)) + + return func(next endpoint.Endpoint) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + auth, ok := ctx.Value(httptransport.ContextKeyRequestAuthorization).(string) + if !ok { + return nil, AuthError{realm} + } + + givenUser, givenPassword, ok := parseBasicAuth(auth) + if !ok { + return nil, AuthError{realm} + } + + givenUserBytes := toHashSlice(givenUser) + givenPasswordBytes := toHashSlice(givenPassword) + + if subtle.ConstantTimeCompare(givenUserBytes, requiredUserBytes) == 0 || + subtle.ConstantTimeCompare(givenPasswordBytes, requiredPasswordBytes) == 0 { + return nil, AuthError{realm} + } + + return next(ctx, request) + } + } +} diff --git a/vendor/github.com/go-kit/kit/auth/basic/middleware_test.go b/vendor/github.com/go-kit/kit/auth/basic/middleware_test.go new file mode 100644 index 000000000..9ad330ebb --- /dev/null +++ b/vendor/github.com/go-kit/kit/auth/basic/middleware_test.go @@ -0,0 +1,52 @@ +package basic + +import ( + "context" + "encoding/base64" + "fmt" + "testing" + + httptransport "github.com/go-kit/kit/transport/http" +) + +func TestWithBasicAuth(t *testing.T) { + requiredUser := "test-user" + requiredPassword := "test-pass" + realm := "test realm" + + type want struct { + result interface{} + err error + } + tests := []struct { + name string + authHeader interface{} + want want + }{ + {"Isn't valid with nil header", nil, want{nil, AuthError{realm}}}, + {"Isn't valid with non-string header", 42, want{nil, AuthError{realm}}}, + {"Isn't valid without authHeader", "", want{nil, AuthError{realm}}}, + {"Isn't valid for wrong user", makeAuthString("wrong-user", requiredPassword), want{nil, AuthError{realm}}}, + {"Isn't valid for wrong password", makeAuthString(requiredUser, "wrong-password"), want{nil, AuthError{realm}}}, + {"Is valid for correct creds", makeAuthString(requiredUser, requiredPassword), want{true, nil}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.WithValue(context.TODO(), httptransport.ContextKeyRequestAuthorization, tt.authHeader) + + result, err := AuthMiddleware(requiredUser, requiredPassword, realm)(passedValidation)(ctx, nil) + if result != tt.want.result || err != tt.want.err { + t.Errorf("WithBasicAuth() = result: %v, err: %v, want result: %v, want error: %v", result, err, tt.want.result, tt.want.err) + } + }) + } +} + +func makeAuthString(user string, password string) string { + data := []byte(fmt.Sprintf("%s:%s", user, password)) + return fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString(data)) +} + +func passedValidation(ctx context.Context, request interface{}) (response interface{}, err error) { + return true, nil +} diff --git a/vendor/github.com/go-kit/kit/auth/jwt/README.md b/vendor/github.com/go-kit/kit/auth/jwt/README.md new file mode 100644 index 000000000..d2430bd28 --- /dev/null +++ b/vendor/github.com/go-kit/kit/auth/jwt/README.md @@ -0,0 +1,122 @@ +# package auth/jwt + +`package auth/jwt` provides a set of interfaces for service authorization +through [JSON Web Tokens](https://jwt.io/). + +## Usage + +NewParser takes a key function and an expected signing method and returns an +`endpoint.Middleware`. The middleware will parse a token passed into the +context via the `jwt.JWTTokenContextKey`. If the token is valid, any claims +will be added to the context via the `jwt.JWTClaimsContextKey`. + +```go +import ( + stdjwt "github.com/dgrijalva/jwt-go" + + "github.com/go-kit/kit/auth/jwt" + "github.com/go-kit/kit/endpoint" +) + +func main() { + var exampleEndpoint endpoint.Endpoint + { + kf := func(token *stdjwt.Token) (interface{}, error) { return []byte("SigningString"), nil } + exampleEndpoint = MakeExampleEndpoint(service) + exampleEndpoint = jwt.NewParser(kf, stdjwt.SigningMethodHS256, jwt.StandardClaimsFactory)(exampleEndpoint) + } +} +``` + +NewSigner takes a JWT key ID header, the signing key, signing method, and a +claims object. It returns an `endpoint.Middleware`. The middleware will build +the token string and add it to the context via the `jwt.JWTTokenContextKey`. + +```go +import ( + stdjwt "github.com/dgrijalva/jwt-go" + + "github.com/go-kit/kit/auth/jwt" + "github.com/go-kit/kit/endpoint" +) + +func main() { + var exampleEndpoint endpoint.Endpoint + { + exampleEndpoint = grpctransport.NewClient(...).Endpoint() + exampleEndpoint = jwt.NewSigner( + "kid-header", + []byte("SigningString"), + stdjwt.SigningMethodHS256, + jwt.Claims{}, + )(exampleEndpoint) + } +} +``` + +In order for the parser and the signer to work, the authorization headers need +to be passed between the request and the context. `ToHTTPContext()`, +`FromHTTPContext()`, `ToGRPCContext()`, and `FromGRPCContext()` are given as +helpers to do this. These functions implement the correlating transport's +RequestFunc interface and can be passed as ClientBefore or ServerBefore +options. + +Example of use in a client: + +```go +import ( + stdjwt "github.com/dgrijalva/jwt-go" + + grpctransport "github.com/go-kit/kit/transport/grpc" + "github.com/go-kit/kit/auth/jwt" + "github.com/go-kit/kit/endpoint" +) + +func main() { + + options := []httptransport.ClientOption{} + var exampleEndpoint endpoint.Endpoint + { + exampleEndpoint = grpctransport.NewClient(..., grpctransport.ClientBefore(jwt.FromGRPCContext())).Endpoint() + exampleEndpoint = jwt.NewSigner( + "kid-header", + []byte("SigningString"), + stdjwt.SigningMethodHS256, + jwt.Claims{}, + )(exampleEndpoint) + } +} +``` + +Example of use in a server: + +```go +import ( + "context" + + "github.com/go-kit/kit/auth/jwt" + "github.com/go-kit/kit/log" + grpctransport "github.com/go-kit/kit/transport/grpc" +) + +func MakeGRPCServer(ctx context.Context, endpoints Endpoints, logger log.Logger) pb.ExampleServer { + options := []grpctransport.ServerOption{grpctransport.ServerErrorLogger(logger)} + + return &grpcServer{ + createUser: grpctransport.NewServer( + ctx, + endpoints.CreateUserEndpoint, + DecodeGRPCCreateUserRequest, + EncodeGRPCCreateUserResponse, + append(options, grpctransport.ServerBefore(jwt.ToGRPCContext()))..., + ), + getUser: grpctransport.NewServer( + ctx, + endpoints.GetUserEndpoint, + DecodeGRPCGetUserRequest, + EncodeGRPCGetUserResponse, + options..., + ), + } +} +``` diff --git a/vendor/github.com/go-kit/kit/auth/jwt/middleware.go b/vendor/github.com/go-kit/kit/auth/jwt/middleware.go new file mode 100644 index 000000000..0e29e6d68 --- /dev/null +++ b/vendor/github.com/go-kit/kit/auth/jwt/middleware.go @@ -0,0 +1,143 @@ +package jwt + +import ( + "context" + "errors" + + jwt "github.com/dgrijalva/jwt-go" + + "github.com/go-kit/kit/endpoint" +) + +type contextKey string + +const ( + // JWTTokenContextKey holds the key used to store a JWT Token in the + // context. + JWTTokenContextKey contextKey = "JWTToken" + + // JWTClaimsContextKey holds the key used to store the JWT Claims in the + // context. + JWTClaimsContextKey contextKey = "JWTClaims" +) + +var ( + // ErrTokenContextMissing denotes a token was not passed into the parsing + // middleware's context. + ErrTokenContextMissing = errors.New("token up for parsing was not passed through the context") + + // ErrTokenInvalid denotes a token was not able to be validated. + ErrTokenInvalid = errors.New("JWT Token was invalid") + + // ErrTokenExpired denotes a token's expire header (exp) has since passed. + ErrTokenExpired = errors.New("JWT Token is expired") + + // ErrTokenMalformed denotes a token was not formatted as a JWT token. + ErrTokenMalformed = errors.New("JWT Token is malformed") + + // ErrTokenNotActive denotes a token's not before header (nbf) is in the + // future. + ErrTokenNotActive = errors.New("token is not valid yet") + + // ErrUnexpectedSigningMethod denotes a token was signed with an unexpected + // signing method. + ErrUnexpectedSigningMethod = errors.New("unexpected signing method") +) + +// NewSigner creates a new JWT token generating middleware, specifying key ID, +// signing string, signing method and the claims you would like it to contain. +// Tokens are signed with a Key ID header (kid) which is useful for determining +// the key to use for parsing. Particularly useful for clients. +func NewSigner(kid string, key []byte, method jwt.SigningMethod, claims jwt.Claims) endpoint.Middleware { + return func(next endpoint.Endpoint) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + token := jwt.NewWithClaims(method, claims) + token.Header["kid"] = kid + + // Sign and get the complete encoded token as a string using the secret + tokenString, err := token.SignedString(key) + if err != nil { + return nil, err + } + ctx = context.WithValue(ctx, JWTTokenContextKey, tokenString) + + return next(ctx, request) + } + } +} + +// ClaimsFactory is a factory for jwt.Claims. +// Useful in NewParser middleware. +type ClaimsFactory func() jwt.Claims + +// MapClaimsFactory is a ClaimsFactory that returns +// an empty jwt.MapClaims. +func MapClaimsFactory() jwt.Claims { + return jwt.MapClaims{} +} + +// StandardClaimsFactory is a ClaimsFactory that returns +// an empty jwt.StandardClaims. +func StandardClaimsFactory() jwt.Claims { + return &jwt.StandardClaims{} +} + +// NewParser creates a new JWT token parsing middleware, specifying a +// jwt.Keyfunc interface, the signing method and the claims type to be used. NewParser +// adds the resulting claims to endpoint context or returns error on invalid token. +// Particularly useful for servers. +func NewParser(keyFunc jwt.Keyfunc, method jwt.SigningMethod, newClaims ClaimsFactory) endpoint.Middleware { + return func(next endpoint.Endpoint) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + // tokenString is stored in the context from the transport handlers. + tokenString, ok := ctx.Value(JWTTokenContextKey).(string) + if !ok { + return nil, ErrTokenContextMissing + } + + // Parse takes the token string and a function for looking up the + // key. The latter is especially useful if you use multiple keys + // for your application. The standard is to use 'kid' in the head + // of the token to identify which key to use, but the parsed token + // (head and claims) is provided to the callback, providing + // flexibility. + token, err := jwt.ParseWithClaims(tokenString, newClaims(), func(token *jwt.Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if token.Method != method { + return nil, ErrUnexpectedSigningMethod + } + + return keyFunc(token) + }) + if err != nil { + if e, ok := err.(*jwt.ValidationError); ok { + switch { + case e.Errors&jwt.ValidationErrorMalformed != 0: + // Token is malformed + return nil, ErrTokenMalformed + case e.Errors&jwt.ValidationErrorExpired != 0: + // Token is expired + return nil, ErrTokenExpired + case e.Errors&jwt.ValidationErrorNotValidYet != 0: + // Token is not active yet + return nil, ErrTokenNotActive + case e.Inner != nil: + // report e.Inner + return nil, e.Inner + } + // We have a ValidationError but have no specific Go kit error for it. + // Fall through to return original error. + } + return nil, err + } + + if !token.Valid { + return nil, ErrTokenInvalid + } + + ctx = context.WithValue(ctx, JWTClaimsContextKey, token.Claims) + + return next(ctx, request) + } + } +} diff --git a/vendor/github.com/go-kit/kit/auth/jwt/middleware_test.go b/vendor/github.com/go-kit/kit/auth/jwt/middleware_test.go new file mode 100644 index 000000000..3278e13a7 --- /dev/null +++ b/vendor/github.com/go-kit/kit/auth/jwt/middleware_test.go @@ -0,0 +1,221 @@ +package jwt + +import ( + "context" + "sync" + "testing" + "time" + + "crypto/subtle" + + jwt "github.com/dgrijalva/jwt-go" + "github.com/go-kit/kit/endpoint" +) + +type customClaims struct { + MyProperty string `json:"my_property"` + jwt.StandardClaims +} + +func (c customClaims) VerifyMyProperty(p string) bool { + return subtle.ConstantTimeCompare([]byte(c.MyProperty), []byte(p)) != 0 +} + +var ( + kid = "kid" + key = []byte("test_signing_key") + myProperty = "some value" + method = jwt.SigningMethodHS256 + invalidMethod = jwt.SigningMethodRS256 + mapClaims = jwt.MapClaims{"user": "go-kit"} + standardClaims = jwt.StandardClaims{Audience: "go-kit"} + myCustomClaims = customClaims{MyProperty: myProperty, StandardClaims: standardClaims} + // Signed tokens generated at https://jwt.io/ + signedKey = "eyJhbGciOiJIUzI1NiIsImtpZCI6ImtpZCIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoiZ28ta2l0In0.14M2VmYyApdSlV_LZ88ajjwuaLeIFplB8JpyNy0A19E" + standardSignedKey = "eyJhbGciOiJIUzI1NiIsImtpZCI6ImtpZCIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJnby1raXQifQ.L5ypIJjCOOv3jJ8G5SelaHvR04UJuxmcBN5QW3m_aoY" + customSignedKey = "eyJhbGciOiJIUzI1NiIsImtpZCI6ImtpZCIsInR5cCI6IkpXVCJ9.eyJteV9wcm9wZXJ0eSI6InNvbWUgdmFsdWUiLCJhdWQiOiJnby1raXQifQ.s8F-IDrV4WPJUsqr7qfDi-3GRlcKR0SRnkTeUT_U-i0" + invalidKey = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.e30.vKVCKto-Wn6rgz3vBdaZaCBGfCBDTXOENSo_X2Gq7qA" + malformedKey = "malformed.jwt.token" +) + +func signingValidator(t *testing.T, signer endpoint.Endpoint, expectedKey string) { + ctx, err := signer(context.Background(), struct{}{}) + if err != nil { + t.Fatalf("Signer returned error: %s", err) + } + + token, ok := ctx.(context.Context).Value(JWTTokenContextKey).(string) + if !ok { + t.Fatal("Token did not exist in context") + } + + if token != expectedKey { + t.Fatalf("JWT tokens did not match: expecting %s got %s", expectedKey, token) + } +} + +func TestNewSigner(t *testing.T) { + e := func(ctx context.Context, i interface{}) (interface{}, error) { return ctx, nil } + + signer := NewSigner(kid, key, method, mapClaims)(e) + signingValidator(t, signer, signedKey) + + signer = NewSigner(kid, key, method, standardClaims)(e) + signingValidator(t, signer, standardSignedKey) + + signer = NewSigner(kid, key, method, myCustomClaims)(e) + signingValidator(t, signer, customSignedKey) +} + +func TestJWTParser(t *testing.T) { + e := func(ctx context.Context, i interface{}) (interface{}, error) { return ctx, nil } + + keys := func(token *jwt.Token) (interface{}, error) { + return key, nil + } + + parser := NewParser(keys, method, MapClaimsFactory)(e) + + // No Token is passed into the parser + _, err := parser(context.Background(), struct{}{}) + if err == nil { + t.Error("Parser should have returned an error") + } + + if err != ErrTokenContextMissing { + t.Errorf("unexpected error returned, expected: %s got: %s", ErrTokenContextMissing, err) + } + + // Invalid Token is passed into the parser + ctx := context.WithValue(context.Background(), JWTTokenContextKey, invalidKey) + _, err = parser(ctx, struct{}{}) + if err == nil { + t.Error("Parser should have returned an error") + } + + // Invalid Method is used in the parser + badParser := NewParser(keys, invalidMethod, MapClaimsFactory)(e) + ctx = context.WithValue(context.Background(), JWTTokenContextKey, signedKey) + _, err = badParser(ctx, struct{}{}) + if err == nil { + t.Error("Parser should have returned an error") + } + + if err != ErrUnexpectedSigningMethod { + t.Errorf("unexpected error returned, expected: %s got: %s", ErrUnexpectedSigningMethod, err) + } + + // Invalid key is used in the parser + invalidKeys := func(token *jwt.Token) (interface{}, error) { + return []byte("bad"), nil + } + + badParser = NewParser(invalidKeys, method, MapClaimsFactory)(e) + ctx = context.WithValue(context.Background(), JWTTokenContextKey, signedKey) + _, err = badParser(ctx, struct{}{}) + if err == nil { + t.Error("Parser should have returned an error") + } + + // Correct token is passed into the parser + ctx = context.WithValue(context.Background(), JWTTokenContextKey, signedKey) + ctx1, err := parser(ctx, struct{}{}) + if err != nil { + t.Fatalf("Parser returned error: %s", err) + } + + cl, ok := ctx1.(context.Context).Value(JWTClaimsContextKey).(jwt.MapClaims) + if !ok { + t.Fatal("Claims were not passed into context correctly") + } + + if cl["user"] != mapClaims["user"] { + t.Fatalf("JWT Claims.user did not match: expecting %s got %s", mapClaims["user"], cl["user"]) + } + + // Test for malformed token error response + parser = NewParser(keys, method, StandardClaimsFactory)(e) + ctx = context.WithValue(context.Background(), JWTTokenContextKey, malformedKey) + ctx1, err = parser(ctx, struct{}{}) + if want, have := ErrTokenMalformed, err; want != have { + t.Fatalf("Expected %+v, got %+v", want, have) + } + + // Test for expired token error response + parser = NewParser(keys, method, StandardClaimsFactory)(e) + expired := jwt.NewWithClaims(method, jwt.StandardClaims{ExpiresAt: time.Now().Unix() - 100}) + token, err := expired.SignedString(key) + if err != nil { + t.Fatalf("Unable to Sign Token: %+v", err) + } + ctx = context.WithValue(context.Background(), JWTTokenContextKey, token) + ctx1, err = parser(ctx, struct{}{}) + if want, have := ErrTokenExpired, err; want != have { + t.Fatalf("Expected %+v, got %+v", want, have) + } + + // Test for not activated token error response + parser = NewParser(keys, method, StandardClaimsFactory)(e) + notactive := jwt.NewWithClaims(method, jwt.StandardClaims{NotBefore: time.Now().Unix() + 100}) + token, err = notactive.SignedString(key) + if err != nil { + t.Fatalf("Unable to Sign Token: %+v", err) + } + ctx = context.WithValue(context.Background(), JWTTokenContextKey, token) + ctx1, err = parser(ctx, struct{}{}) + if want, have := ErrTokenNotActive, err; want != have { + t.Fatalf("Expected %+v, got %+v", want, have) + } + + // test valid standard claims token + parser = NewParser(keys, method, StandardClaimsFactory)(e) + ctx = context.WithValue(context.Background(), JWTTokenContextKey, standardSignedKey) + ctx1, err = parser(ctx, struct{}{}) + if err != nil { + t.Fatalf("Parser returned error: %s", err) + } + stdCl, ok := ctx1.(context.Context).Value(JWTClaimsContextKey).(*jwt.StandardClaims) + if !ok { + t.Fatal("Claims were not passed into context correctly") + } + if !stdCl.VerifyAudience("go-kit", true) { + t.Fatalf("JWT jwt.StandardClaims.Audience did not match: expecting %s got %s", standardClaims.Audience, stdCl.Audience) + } + + // test valid customized claims token + parser = NewParser(keys, method, func() jwt.Claims { return &customClaims{} })(e) + ctx = context.WithValue(context.Background(), JWTTokenContextKey, customSignedKey) + ctx1, err = parser(ctx, struct{}{}) + if err != nil { + t.Fatalf("Parser returned error: %s", err) + } + custCl, ok := ctx1.(context.Context).Value(JWTClaimsContextKey).(*customClaims) + if !ok { + t.Fatal("Claims were not passed into context correctly") + } + if !custCl.VerifyAudience("go-kit", true) { + t.Fatalf("JWT customClaims.Audience did not match: expecting %s got %s", standardClaims.Audience, custCl.Audience) + } + if !custCl.VerifyMyProperty(myProperty) { + t.Fatalf("JWT customClaims.MyProperty did not match: expecting %s got %s", myProperty, custCl.MyProperty) + } +} + +func TestIssue562(t *testing.T) { + var ( + kf = func(token *jwt.Token) (interface{}, error) { return []byte("secret"), nil } + e = NewParser(kf, jwt.SigningMethodHS256, MapClaimsFactory)(endpoint.Nop) + key = JWTTokenContextKey + val = "eyJhbGciOiJIUzI1NiIsImtpZCI6ImtpZCIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoiZ28ta2l0In0.14M2VmYyApdSlV_LZ88ajjwuaLeIFplB8JpyNy0A19E" + ctx = context.WithValue(context.Background(), key, val) + ) + wg := sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + e(ctx, struct{}{}) // fatal error: concurrent map read and map write + }() + } + wg.Wait() +} diff --git a/vendor/github.com/go-kit/kit/auth/jwt/transport.go b/vendor/github.com/go-kit/kit/auth/jwt/transport.go new file mode 100644 index 000000000..57b3aaeef --- /dev/null +++ b/vendor/github.com/go-kit/kit/auth/jwt/transport.go @@ -0,0 +1,89 @@ +package jwt + +import ( + "context" + "fmt" + stdhttp "net/http" + "strings" + + "google.golang.org/grpc/metadata" + + "github.com/go-kit/kit/transport/grpc" + "github.com/go-kit/kit/transport/http" +) + +const ( + bearer string = "bearer" + bearerFormat string = "Bearer %s" +) + +// HTTPToContext moves a JWT from request header to context. Particularly +// useful for servers. +func HTTPToContext() http.RequestFunc { + return func(ctx context.Context, r *stdhttp.Request) context.Context { + token, ok := extractTokenFromAuthHeader(r.Header.Get("Authorization")) + if !ok { + return ctx + } + + return context.WithValue(ctx, JWTTokenContextKey, token) + } +} + +// ContextToHTTP moves a JWT from context to request header. Particularly +// useful for clients. +func ContextToHTTP() http.RequestFunc { + return func(ctx context.Context, r *stdhttp.Request) context.Context { + token, ok := ctx.Value(JWTTokenContextKey).(string) + if ok { + r.Header.Add("Authorization", generateAuthHeaderFromToken(token)) + } + return ctx + } +} + +// GRPCToContext moves a JWT from grpc metadata to context. Particularly +// userful for servers. +func GRPCToContext() grpc.ServerRequestFunc { + return func(ctx context.Context, md metadata.MD) context.Context { + // capital "Key" is illegal in HTTP/2. + authHeader, ok := md["authorization"] + if !ok { + return ctx + } + + token, ok := extractTokenFromAuthHeader(authHeader[0]) + if ok { + ctx = context.WithValue(ctx, JWTTokenContextKey, token) + } + + return ctx + } +} + +// ContextToGRPC moves a JWT from context to grpc metadata. Particularly +// useful for clients. +func ContextToGRPC() grpc.ClientRequestFunc { + return func(ctx context.Context, md *metadata.MD) context.Context { + token, ok := ctx.Value(JWTTokenContextKey).(string) + if ok { + // capital "Key" is illegal in HTTP/2. + (*md)["authorization"] = []string{generateAuthHeaderFromToken(token)} + } + + return ctx + } +} + +func extractTokenFromAuthHeader(val string) (token string, ok bool) { + authHeaderParts := strings.Split(val, " ") + if len(authHeaderParts) != 2 || strings.ToLower(authHeaderParts[0]) != bearer { + return "", false + } + + return authHeaderParts[1], true +} + +func generateAuthHeaderFromToken(token string) string { + return fmt.Sprintf(bearerFormat, token) +} diff --git a/vendor/github.com/go-kit/kit/auth/jwt/transport_test.go b/vendor/github.com/go-kit/kit/auth/jwt/transport_test.go new file mode 100644 index 000000000..83d0f17f8 --- /dev/null +++ b/vendor/github.com/go-kit/kit/auth/jwt/transport_test.go @@ -0,0 +1,125 @@ +package jwt + +import ( + "context" + "fmt" + "net/http" + "testing" + + "google.golang.org/grpc/metadata" +) + +func TestHTTPToContext(t *testing.T) { + reqFunc := HTTPToContext() + + // When the header doesn't exist + ctx := reqFunc(context.Background(), &http.Request{}) + + if ctx.Value(JWTTokenContextKey) != nil { + t.Error("Context shouldn't contain the encoded JWT") + } + + // Authorization header value has invalid format + header := http.Header{} + header.Set("Authorization", "no expected auth header format value") + ctx = reqFunc(context.Background(), &http.Request{Header: header}) + + if ctx.Value(JWTTokenContextKey) != nil { + t.Error("Context shouldn't contain the encoded JWT") + } + + // Authorization header is correct + header.Set("Authorization", generateAuthHeaderFromToken(signedKey)) + ctx = reqFunc(context.Background(), &http.Request{Header: header}) + + token := ctx.Value(JWTTokenContextKey).(string) + if token != signedKey { + t.Errorf("Context doesn't contain the expected encoded token value; expected: %s, got: %s", signedKey, token) + } +} + +func TestContextToHTTP(t *testing.T) { + reqFunc := ContextToHTTP() + + // No JWT Token is passed in the context + ctx := context.Background() + r := http.Request{} + reqFunc(ctx, &r) + + token := r.Header.Get("Authorization") + if token != "" { + t.Error("authorization key should not exist in metadata") + } + + // Correct JWT Token is passed in the context + ctx = context.WithValue(context.Background(), JWTTokenContextKey, signedKey) + r = http.Request{Header: http.Header{}} + reqFunc(ctx, &r) + + token = r.Header.Get("Authorization") + expected := generateAuthHeaderFromToken(signedKey) + + if token != expected { + t.Errorf("Authorization header does not contain the expected JWT token; expected %s, got %s", expected, token) + } +} + +func TestGRPCToContext(t *testing.T) { + md := metadata.MD{} + reqFunc := GRPCToContext() + + // No Authorization header is passed + ctx := reqFunc(context.Background(), md) + token := ctx.Value(JWTTokenContextKey) + if token != nil { + t.Error("Context should not contain a JWT Token") + } + + // Invalid Authorization header is passed + md["authorization"] = []string{fmt.Sprintf("%s", signedKey)} + ctx = reqFunc(context.Background(), md) + token = ctx.Value(JWTTokenContextKey) + if token != nil { + t.Error("Context should not contain a JWT Token") + } + + // Authorization header is correct + md["authorization"] = []string{fmt.Sprintf("Bearer %s", signedKey)} + ctx = reqFunc(context.Background(), md) + token, ok := ctx.Value(JWTTokenContextKey).(string) + if !ok { + t.Fatal("JWT Token not passed to context correctly") + } + + if token != signedKey { + t.Errorf("JWT tokens did not match: expecting %s got %s", signedKey, token) + } +} + +func TestContextToGRPC(t *testing.T) { + reqFunc := ContextToGRPC() + + // No JWT Token is passed in the context + ctx := context.Background() + md := metadata.MD{} + reqFunc(ctx, &md) + + _, ok := md["authorization"] + if ok { + t.Error("authorization key should not exist in metadata") + } + + // Correct JWT Token is passed in the context + ctx = context.WithValue(context.Background(), JWTTokenContextKey, signedKey) + md = metadata.MD{} + reqFunc(ctx, &md) + + token, ok := md["authorization"] + if !ok { + t.Fatal("JWT Token not passed to metadata correctly") + } + + if token[0] != generateAuthHeaderFromToken(signedKey) { + t.Errorf("JWT tokens did not match: expecting %s got %s", signedKey, token[0]) + } +} diff --git a/vendor/github.com/go-kit/kit/circle.yml b/vendor/github.com/go-kit/kit/circle.yml new file mode 100644 index 000000000..35ace2ce8 --- /dev/null +++ b/vendor/github.com/go-kit/kit/circle.yml @@ -0,0 +1,27 @@ +machine: + pre: + - curl -sSL https://s3.amazonaws.com/circle-downloads/install-circleci-docker.sh | bash -s -- 1.10.0 + - sudo rm -rf /usr/local/go + - curl -sSL https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz | sudo tar xz -C /usr/local + services: + - docker + +dependencies: + pre: + - sudo curl -L "https://github.com/docker/compose/releases/download/1.10.0/docker-compose-linux-x86_64" -o /usr/local/bin/docker-compose + - sudo chmod +x /usr/local/bin/docker-compose + - docker-compose -f docker-compose-integration.yml up -d --force-recreate + +test: + pre: + - mkdir -p /home/ubuntu/.go_workspace/src/github.com/go-kit + - mv /home/ubuntu/kit /home/ubuntu/.go_workspace/src/github.com/go-kit + - ln -s /home/ubuntu/.go_workspace/src/github.com/go-kit/kit /home/ubuntu/kit + - go get -t github.com/go-kit/kit/... + override: + - go test -v -race -tags integration github.com/go-kit/kit/...: + environment: + ETCD_ADDR: http://localhost:2379 + CONSUL_ADDR: localhost:8500 + ZK_ADDR: localhost:2181 + EUREKA_ADDR: http://localhost:8761/eureka diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/doc.go b/vendor/github.com/go-kit/kit/circuitbreaker/doc.go new file mode 100644 index 000000000..fe7495759 --- /dev/null +++ b/vendor/github.com/go-kit/kit/circuitbreaker/doc.go @@ -0,0 +1,10 @@ +// Package circuitbreaker implements the circuit breaker pattern. +// +// Circuit breakers prevent thundering herds, and improve resiliency against +// intermittent errors. Every client-side endpoint should be wrapped in a +// circuit breaker. +// +// We provide several implementations in this package, but if you're looking +// for guidance, Gobreaker is probably the best place to start. It has a +// simple and intuitive API, and is well-tested. +package circuitbreaker diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/gobreaker.go b/vendor/github.com/go-kit/kit/circuitbreaker/gobreaker.go new file mode 100644 index 000000000..cf06304f4 --- /dev/null +++ b/vendor/github.com/go-kit/kit/circuitbreaker/gobreaker.go @@ -0,0 +1,22 @@ +package circuitbreaker + +import ( + "context" + + "github.com/sony/gobreaker" + + "github.com/go-kit/kit/endpoint" +) + +// Gobreaker returns an endpoint.Middleware that implements the circuit +// breaker pattern using the sony/gobreaker package. Only errors returned by +// the wrapped endpoint count against the circuit breaker's error count. +// +// See http://godoc.org/github.com/sony/gobreaker for more information. +func Gobreaker(cb *gobreaker.CircuitBreaker) endpoint.Middleware { + return func(next endpoint.Endpoint) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + return cb.Execute(func() (interface{}, error) { return next(ctx, request) }) + } + } +} diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/gobreaker_test.go b/vendor/github.com/go-kit/kit/circuitbreaker/gobreaker_test.go new file mode 100644 index 000000000..b581cfe59 --- /dev/null +++ b/vendor/github.com/go-kit/kit/circuitbreaker/gobreaker_test.go @@ -0,0 +1,19 @@ +package circuitbreaker_test + +import ( + "testing" + + "github.com/sony/gobreaker" + + "github.com/go-kit/kit/circuitbreaker" +) + +func TestGobreaker(t *testing.T) { + var ( + breaker = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{})) + primeWith = 100 + shouldPass = func(n int) bool { return n <= 5 } // https://github.com/sony/gobreaker/blob/bfa846d/gobreaker.go#L76 + circuitOpenError = "circuit breaker is open" + ) + testFailingEndpoint(t, breaker, primeWith, shouldPass, 0, circuitOpenError) +} diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/handy_breaker.go b/vendor/github.com/go-kit/kit/circuitbreaker/handy_breaker.go new file mode 100644 index 000000000..a5b60be2e --- /dev/null +++ b/vendor/github.com/go-kit/kit/circuitbreaker/handy_breaker.go @@ -0,0 +1,38 @@ +package circuitbreaker + +import ( + "context" + "time" + + "github.com/streadway/handy/breaker" + + "github.com/go-kit/kit/endpoint" +) + +// HandyBreaker returns an endpoint.Middleware that implements the circuit +// breaker pattern using the streadway/handy/breaker package. Only errors +// returned by the wrapped endpoint count against the circuit breaker's error +// count. +// +// See http://godoc.org/github.com/streadway/handy/breaker for more +// information. +func HandyBreaker(cb breaker.Breaker) endpoint.Middleware { + return func(next endpoint.Endpoint) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + if !cb.Allow() { + return nil, breaker.ErrCircuitOpen + } + + defer func(begin time.Time) { + if err == nil { + cb.Success(time.Since(begin)) + } else { + cb.Failure(time.Since(begin)) + } + }(time.Now()) + + response, err = next(ctx, request) + return + } + } +} diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/handy_breaker_test.go b/vendor/github.com/go-kit/kit/circuitbreaker/handy_breaker_test.go new file mode 100644 index 000000000..f3642a109 --- /dev/null +++ b/vendor/github.com/go-kit/kit/circuitbreaker/handy_breaker_test.go @@ -0,0 +1,20 @@ +package circuitbreaker_test + +import ( + "testing" + + handybreaker "github.com/streadway/handy/breaker" + + "github.com/go-kit/kit/circuitbreaker" +) + +func TestHandyBreaker(t *testing.T) { + var ( + failureRatio = 0.05 + breaker = circuitbreaker.HandyBreaker(handybreaker.NewBreaker(failureRatio)) + primeWith = handybreaker.DefaultMinObservations * 10 + shouldPass = func(n int) bool { return (float64(n) / float64(primeWith+n)) <= failureRatio } + openCircuitError = handybreaker.ErrCircuitOpen.Error() + ) + testFailingEndpoint(t, breaker, primeWith, shouldPass, 0, openCircuitError) +} diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/hystrix.go b/vendor/github.com/go-kit/kit/circuitbreaker/hystrix.go new file mode 100644 index 000000000..3c59ec41e --- /dev/null +++ b/vendor/github.com/go-kit/kit/circuitbreaker/hystrix.go @@ -0,0 +1,31 @@ +package circuitbreaker + +import ( + "context" + + "github.com/afex/hystrix-go/hystrix" + + "github.com/go-kit/kit/endpoint" +) + +// Hystrix returns an endpoint.Middleware that implements the circuit +// breaker pattern using the afex/hystrix-go package. +// +// When using this circuit breaker, please configure your commands separately. +// +// See https://godoc.org/github.com/afex/hystrix-go/hystrix for more +// information. +func Hystrix(commandName string) endpoint.Middleware { + return func(next endpoint.Endpoint) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + var resp interface{} + if err := hystrix.Do(commandName, func() (err error) { + resp, err = next(ctx, request) + return err + }, nil); err != nil { + return nil, err + } + return resp, nil + } + } +} diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/hystrix_test.go b/vendor/github.com/go-kit/kit/circuitbreaker/hystrix_test.go new file mode 100644 index 000000000..da527576e --- /dev/null +++ b/vendor/github.com/go-kit/kit/circuitbreaker/hystrix_test.go @@ -0,0 +1,40 @@ +package circuitbreaker_test + +import ( + "io/ioutil" + stdlog "log" + "testing" + "time" + + "github.com/afex/hystrix-go/hystrix" + + "github.com/go-kit/kit/circuitbreaker" +) + +func TestHystrix(t *testing.T) { + stdlog.SetOutput(ioutil.Discard) + + const ( + commandName = "my-endpoint" + errorPercent = 5 + maxConcurrent = 1000 + ) + hystrix.ConfigureCommand(commandName, hystrix.CommandConfig{ + ErrorPercentThreshold: errorPercent, + MaxConcurrentRequests: maxConcurrent, + }) + + var ( + breaker = circuitbreaker.Hystrix(commandName) + primeWith = hystrix.DefaultVolumeThreshold * 2 + shouldPass = func(n int) bool { return (float64(n) / float64(primeWith+n)) <= (float64(errorPercent-1) / 100.0) } + openCircuitError = hystrix.ErrCircuitOpen.Error() + ) + + // hystrix-go uses buffered channels to receive reports on request success/failure, + // and so is basically impossible to test deterministically. We have to make sure + // the report buffer is emptied, by injecting a sleep between each invocation. + requestDelay := 5 * time.Millisecond + + testFailingEndpoint(t, breaker, primeWith, shouldPass, requestDelay, openCircuitError) +} diff --git a/vendor/github.com/go-kit/kit/circuitbreaker/util_test.go b/vendor/github.com/go-kit/kit/circuitbreaker/util_test.go new file mode 100644 index 000000000..25123805a --- /dev/null +++ b/vendor/github.com/go-kit/kit/circuitbreaker/util_test.go @@ -0,0 +1,75 @@ +package circuitbreaker_test + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/go-kit/kit/endpoint" +) + +func testFailingEndpoint( + t *testing.T, + breaker endpoint.Middleware, + primeWith int, + shouldPass func(int) bool, + requestDelay time.Duration, + openCircuitError string, +) { + _, file, line, _ := runtime.Caller(1) + caller := fmt.Sprintf("%s:%d", filepath.Base(file), line) + + // Create a mock endpoint and wrap it with the breaker. + m := mock{} + var e endpoint.Endpoint + e = m.endpoint + e = breaker(e) + + // Prime the endpoint with successful requests. + for i := 0; i < primeWith; i++ { + if _, err := e(context.Background(), struct{}{}); err != nil { + t.Fatalf("%s: during priming, got error: %v", caller, err) + } + time.Sleep(requestDelay) + } + + // Switch the endpoint to start throwing errors. + m.err = errors.New("tragedy+disaster") + m.through = 0 + + // The first several should be allowed through and yield our error. + for i := 0; shouldPass(i); i++ { + if _, err := e(context.Background(), struct{}{}); err != m.err { + t.Fatalf("%s: want %v, have %v", caller, m.err, err) + } + time.Sleep(requestDelay) + } + through := m.through + + // But the rest should be blocked by an open circuit. + for i := 0; i < 10; i++ { + if _, err := e(context.Background(), struct{}{}); err.Error() != openCircuitError { + t.Fatalf("%s: want %q, have %q", caller, openCircuitError, err.Error()) + } + time.Sleep(requestDelay) + } + + // Make sure none of those got through. + if want, have := through, m.through; want != have { + t.Errorf("%s: want %d, have %d", caller, want, have) + } +} + +type mock struct { + through int + err error +} + +func (m *mock) endpoint(context.Context, interface{}) (interface{}, error) { + m.through++ + return struct{}{}, m.err +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/.ignore b/vendor/github.com/go-kit/kit/cmd/kitgen/.ignore new file mode 100644 index 000000000..747f955ca --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/.ignore @@ -0,0 +1 @@ +testdata/*/*/ diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/arg.go b/vendor/github.com/go-kit/kit/cmd/kitgen/arg.go new file mode 100644 index 000000000..bcf4e0a5d --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/arg.go @@ -0,0 +1,36 @@ +package main + +import "go/ast" + +type arg struct { + name, asField *ast.Ident + typ ast.Expr +} + +func (a arg) chooseName(scope *ast.Scope) *ast.Ident { + if a.name == nil || scope.Lookup(a.name.Name) != nil { + return inventName(a.typ, scope) + } + return a.name +} + +func (a arg) field(scope *ast.Scope) *ast.Field { + return &ast.Field{ + Names: []*ast.Ident{a.chooseName(scope)}, + Type: a.typ, + } +} + +func (a arg) result() *ast.Field { + return &ast.Field{ + Names: nil, + Type: a.typ, + } +} + +func (a arg) exported() *ast.Field { + return &ast.Field{ + Names: []*ast.Ident{id(export(a.asField.Name))}, + Type: a.typ, + } +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/ast_helpers.go b/vendor/github.com/go-kit/kit/cmd/kitgen/ast_helpers.go new file mode 100644 index 000000000..ab7c277db --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/ast_helpers.go @@ -0,0 +1,208 @@ +package main + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "strings" + "unicode" +) + +func export(s string) string { + return strings.Title(s) +} + +func unexport(s string) string { + first := true + return strings.Map(func(r rune) rune { + if first { + first = false + return unicode.ToLower(r) + } + return r + }, s) +} + +func inventName(t ast.Expr, scope *ast.Scope) *ast.Ident { + n := baseName(t) + for try := 0; ; try++ { + nstr := pickName(n, try) + obj := ast.NewObj(ast.Var, nstr) + if alt := scope.Insert(obj); alt == nil { + return ast.NewIdent(nstr) + } + } +} + +func baseName(t ast.Expr) string { + switch tt := t.(type) { + default: + panic(fmt.Sprintf("don't know how to choose a base name for %T (%[1]v)", tt)) + case *ast.ArrayType: + return "slice" + case *ast.Ident: + return tt.Name + case *ast.SelectorExpr: + return tt.Sel.Name + } +} + +func pickName(base string, idx int) string { + if idx == 0 { + switch base { + default: + return strings.Split(base, "")[0] + case "Context": + return "ctx" + case "error": + return "err" + } + } + return fmt.Sprintf("%s%d", base, idx) +} + +func scopeWith(names ...string) *ast.Scope { + scope := ast.NewScope(nil) + for _, name := range names { + scope.Insert(ast.NewObj(ast.Var, name)) + } + return scope +} + +type visitFn func(ast.Node, func(ast.Node)) + +func (fn visitFn) Visit(node ast.Node, r func(ast.Node)) Visitor { + fn(node, r) + return fn +} + +func replaceIdent(src ast.Node, named string, with ast.Node) ast.Node { + r := visitFn(func(node ast.Node, replaceWith func(ast.Node)) { + switch id := node.(type) { + case *ast.Ident: + if id.Name == named { + replaceWith(with) + } + } + }) + return WalkReplace(r, src) +} + +func replaceLit(src ast.Node, from, to string) ast.Node { + r := visitFn(func(node ast.Node, replaceWith func(ast.Node)) { + switch lit := node.(type) { + case *ast.BasicLit: + if lit.Value == from { + replaceWith(&ast.BasicLit{Value: to}) + } + } + }) + return WalkReplace(r, src) +} + +func fullAST() *ast.File { + full, err := ASTTemplates.Open("full.go") + if err != nil { + panic(err) + } + f, err := parser.ParseFile(token.NewFileSet(), "templates/full.go", full, parser.DeclarationErrors) + if err != nil { + panic(err) + } + return f +} + +func fetchImports() []*ast.ImportSpec { + return fullAST().Imports +} + +func fetchFuncDecl(name string) *ast.FuncDecl { + f := fullAST() + for _, decl := range f.Decls { + if f, ok := decl.(*ast.FuncDecl); ok && f.Name.Name == name { + return f + } + } + panic(fmt.Errorf("No function called %q in 'templates/full.go'", name)) +} + +func id(name string) *ast.Ident { + return ast.NewIdent(name) +} + +func sel(ids ...*ast.Ident) ast.Expr { + switch len(ids) { + default: + return &ast.SelectorExpr{ + X: sel(ids[:len(ids)-1]...), + Sel: ids[len(ids)-1], + } + case 1: + return ids[0] + case 0: + panic("zero ids to sel()") + } +} + +func typeField(t ast.Expr) *ast.Field { + return &ast.Field{Type: t} +} + +func field(n *ast.Ident, t ast.Expr) *ast.Field { + return &ast.Field{ + Names: []*ast.Ident{n}, + Type: t, + } +} + +func fieldList(list ...*ast.Field) *ast.FieldList { + return &ast.FieldList{List: list} +} + +func mappedFieldList(fn func(arg) *ast.Field, args ...arg) *ast.FieldList { + fl := &ast.FieldList{List: []*ast.Field{}} + for _, a := range args { + fl.List = append(fl.List, fn(a)) + } + return fl +} + +func blockStmt(stmts ...ast.Stmt) *ast.BlockStmt { + return &ast.BlockStmt{ + List: stmts, + } +} + +func structDecl(name *ast.Ident, fields *ast.FieldList) ast.Decl { + return typeDecl(&ast.TypeSpec{ + Name: name, + Type: &ast.StructType{ + Fields: fields, + }, + }) +} + +func typeDecl(ts *ast.TypeSpec) ast.Decl { + return &ast.GenDecl{ + Tok: token.TYPE, + Specs: []ast.Spec{ts}, + } +} + +func pasteStmts(body *ast.BlockStmt, idx int, stmts []ast.Stmt) { + list := body.List + prefix := list[:idx] + suffix := make([]ast.Stmt, len(list)-idx-1) + copy(suffix, list[idx+1:]) + + body.List = append(append(prefix, stmts...), suffix...) +} + +func importFor(is *ast.ImportSpec) *ast.GenDecl { + return &ast.GenDecl{Tok: token.IMPORT, Specs: []ast.Spec{is}} +} + +func importSpec(path string) *ast.ImportSpec { + return &ast.ImportSpec{Path: &ast.BasicLit{Kind: token.STRING, Value: `"` + path + `"`}} +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/ast_templates.go b/vendor/github.com/go-kit/kit/cmd/kitgen/ast_templates.go new file mode 100644 index 000000000..13aa87c20 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/ast_templates.go @@ -0,0 +1,11 @@ +// This file was automatically generated based on the contents of *.tmpl +// If you need to update this file, change the contents of those files +// (or add new ones) and run 'go generate' + +package main + +import "golang.org/x/tools/godoc/vfs/mapfs" + +var ASTTemplates = mapfs.New(map[string]string{ + `full.go`: "package foo\n\nimport (\n \"context\"\n \"encoding/json\"\n \"errors\"\n \"net/http\"\n\n \"github.com/go-kit/kit/endpoint\"\n httptransport \"github.com/go-kit/kit/transport/http\"\n)\n\ntype ExampleService struct {\n}\n\ntype ExampleRequest struct {\n I int\n S string\n}\ntype ExampleResponse struct {\n S string\n Err error\n}\n\ntype Endpoints struct {\n ExampleEndpoint endpoint.Endpoint\n}\n\nfunc (f ExampleService) ExampleEndpoint(ctx context.Context, i int, s string) (string, error) {\n panic(errors.New(\"not implemented\"))\n}\n\nfunc makeExampleEndpoint(f ExampleService) endpoint.Endpoint {\n return func(ctx context.Context, request interface{}) (interface{}, error) {\n req := request.(ExampleRequest)\n s, err := f.ExampleEndpoint(ctx, req.I, req.S)\n return ExampleResponse{S: s, Err: err}, nil\n }\n}\n\nfunc inlineHandlerBuilder(m *http.ServeMux, endpoints Endpoints) {\n m.Handle(\"/bar\", httptransport.NewServer(endpoints.ExampleEndpoint, DecodeExampleRequest, EncodeExampleResponse))\n}\n\nfunc NewHTTPHandler(endpoints Endpoints) http.Handler {\n m := http.NewServeMux()\n inlineHandlerBuilder(m, endpoints)\n return m\n}\n\nfunc DecodeExampleRequest(_ context.Context, r *http.Request) (interface{}, error) {\n var req ExampleRequest\n err := json.NewDecoder(r.Body).Decode(&req)\n return req, err\n}\n\nfunc EncodeExampleResponse(_ context.Context, w http.ResponseWriter, response interface{}) error {\n w.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n return json.NewEncoder(w).Encode(response)\n}\n", +}) diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/deflayout.go b/vendor/github.com/go-kit/kit/cmd/kitgen/deflayout.go new file mode 100644 index 000000000..27e2fec37 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/deflayout.go @@ -0,0 +1,63 @@ +package main + +import "path/filepath" + +type deflayout struct { + targetDir string +} + +func (l deflayout) packagePath(sub string) string { + return filepath.Join(l.targetDir, sub) +} + +func (l deflayout) transformAST(ctx *sourceContext) (files, error) { + out := make(outputTree) + + endpoints := out.addFile("endpoints/endpoints.go", "endpoints") + http := out.addFile("http/http.go", "http") + service := out.addFile("service/service.go", "service") + + addImports(endpoints, ctx) + addImports(http, ctx) + addImports(service, ctx) + + for _, typ := range ctx.types { + addType(service, typ) + } + + for _, iface := range ctx.interfaces { //only one... + addStubStruct(service, iface) + + for _, meth := range iface.methods { + addMethod(service, iface, meth) + addRequestStruct(endpoints, meth) + addResponseStruct(endpoints, meth) + addEndpointMaker(endpoints, iface, meth) + } + + addEndpointsStruct(endpoints, iface) + addHTTPHandler(http, iface) + + for _, meth := range iface.methods { + addDecoder(http, meth) + addEncoder(http, meth) + } + + for name := range out { + out[name] = selectify(out[name], "service", iface.stubName().Name, l.packagePath("service")) + for _, meth := range iface.methods { + out[name] = selectify(out[name], "endpoints", meth.requestStructName().Name, l.packagePath("endpoints")) + } + } + } + + for name := range out { + out[name] = selectify(out[name], "endpoints", "Endpoints", l.packagePath("endpoints")) + + for _, typ := range ctx.types { + out[name] = selectify(out[name], "service", typ.Name.Name, l.packagePath("service")) + } + } + + return formatNodes(out) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/flatlayout.go b/vendor/github.com/go-kit/kit/cmd/kitgen/flatlayout.go new file mode 100644 index 000000000..fedffa4b8 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/flatlayout.go @@ -0,0 +1,39 @@ +package main + +import "go/ast" + +type flat struct{} + +func (f flat) transformAST(ctx *sourceContext) (files, error) { + root := &ast.File{ + Name: ctx.pkg, + Decls: []ast.Decl{}, + } + + addImports(root, ctx) + + for _, typ := range ctx.types { + addType(root, typ) + } + + for _, iface := range ctx.interfaces { //only one... + addStubStruct(root, iface) + + for _, meth := range iface.methods { + addMethod(root, iface, meth) + addRequestStruct(root, meth) + addResponseStruct(root, meth) + addEndpointMaker(root, iface, meth) + } + + addEndpointsStruct(root, iface) + addHTTPHandler(root, iface) + + for _, meth := range iface.methods { + addDecoder(root, meth) + addEncoder(root, meth) + } + } + + return formatNodes(outputTree{"gokit.go": root}) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/interface.go b/vendor/github.com/go-kit/kit/cmd/kitgen/interface.go new file mode 100644 index 000000000..0c984dfca --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/interface.go @@ -0,0 +1,70 @@ +package main + +import "go/ast" + +// because "interface" is a keyword... +type iface struct { + name, stubname, rcvrName *ast.Ident + methods []method +} + +func (i iface) stubName() *ast.Ident { + return i.stubname +} + +func (i iface) stubStructDecl() ast.Decl { + return structDecl(i.stubName(), &ast.FieldList{}) +} + +func (i iface) endpointsStruct() ast.Decl { + fl := &ast.FieldList{} + for _, m := range i.methods { + fl.List = append(fl.List, &ast.Field{Names: []*ast.Ident{m.name}, Type: sel(id("endpoint"), id("Endpoint"))}) + } + return structDecl(id("Endpoints"), fl) +} + +func (i iface) httpHandler() ast.Decl { + handlerFn := fetchFuncDecl("NewHTTPHandler") + + // does this "inlining" process merit a helper akin to replaceIdent? + handleCalls := []ast.Stmt{} + for _, m := range i.methods { + handleCall := fetchFuncDecl("inlineHandlerBuilder").Body.List[0].(*ast.ExprStmt).X.(*ast.CallExpr) + + handleCall = replaceLit(handleCall, `"/bar"`, `"`+m.pathName()+`"`).(*ast.CallExpr) + handleCall = replaceIdent(handleCall, "ExampleEndpoint", m.name).(*ast.CallExpr) + handleCall = replaceIdent(handleCall, "DecodeExampleRequest", m.decodeFuncName()).(*ast.CallExpr) + handleCall = replaceIdent(handleCall, "EncodeExampleResponse", m.encodeFuncName()).(*ast.CallExpr) + + handleCalls = append(handleCalls, &ast.ExprStmt{X: handleCall}) + } + + pasteStmts(handlerFn.Body, 1, handleCalls) + + return handlerFn +} + +func (i iface) reciever() *ast.Field { + return field(i.receiverName(), i.stubName()) +} + +func (i iface) receiverName() *ast.Ident { + if i.rcvrName != nil { + return i.rcvrName + } + scope := ast.NewScope(nil) + for _, meth := range i.methods { + for _, arg := range meth.params { + if arg.name != nil { + scope.Insert(ast.NewObj(ast.Var, arg.name.Name)) + } + } + for _, arg := range meth.results { + if arg.name != nil { + scope.Insert(ast.NewObj(ast.Var, arg.name.Name)) + } + } + } + return id(unexport(inventName(i.name, scope).Name)) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/main.go b/vendor/github.com/go-kit/kit/cmd/kitgen/main.go new file mode 100644 index 000000000..fdfd1fb9c --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/main.go @@ -0,0 +1,156 @@ +package main + +import ( + "flag" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + "log" + "os" + "path" + + "github.com/pkg/errors" +) + +// go get github.com/nyarly/inlinefiles +//go:generate inlinefiles --package=main --vfs=ASTTemplates ./templates ast_templates.go + +func usage() string { + return fmt.Sprintf("Usage: %s (try -h)", os.Args[0]) +} + +var ( + help = flag.Bool("h", false, "print this help") + layoutkind = flag.String("repo-layout", "default", "default, flat...") + outdirrel = flag.String("target-dir", ".", "base directory to emit into") + //contextOmittable = flag.Bool("allow-no-context", false, "allow service methods to omit context parameter") +) + +func helpText() { + fmt.Println("USAGE") + fmt.Println(" kitgen [flags] path/to/service.go") + fmt.Println("") + fmt.Println("FLAGS") + flag.PrintDefaults() +} + +func main() { + flag.Parse() + + if *help { + helpText() + os.Exit(0) + } + + outdir := *outdirrel + if !path.IsAbs(*outdirrel) { + wd, err := os.Getwd() + if err != nil { + log.Fatalf("error getting current working directory: %v", err) + } + outdir = path.Join(wd, *outdirrel) + } + + var layout layout + switch *layoutkind { + default: + log.Fatalf("Unrecognized layout kind: %q - try 'default' or 'flat'", *layoutkind) + case "default": + gopath := getGopath() + importBase, err := importPath(outdir, gopath) + if err != nil { + log.Fatal(err) + } + layout = deflayout{targetDir: importBase} + case "flat": + layout = flat{} + } + + if len(os.Args) < 2 { + log.Fatal(usage()) + } + filename := flag.Arg(0) + file, err := os.Open(filename) + if err != nil { + log.Fatalf("error while opening %q: %v", filename, err) + } + + tree, err := process(filename, file, layout) + if err != nil { + log.Fatal(err) + } + + err = splat(outdir, tree) + if err != nil { + log.Fatal(err) + } +} + +func process(filename string, source io.Reader, layout layout) (files, error) { + f, err := parseFile(filename, source) + if err != nil { + return nil, errors.Wrapf(err, "parsing input %q", filename) + } + + context, err := extractContext(f) + if err != nil { + return nil, errors.Wrapf(err, "examining input file %q", filename) + } + + tree, err := layout.transformAST(context) + if err != nil { + return nil, errors.Wrapf(err, "generating AST") + } + return tree, nil +} + +/* + buf, err := formatNode(dest) + if err != nil { + return nil, errors.Wrapf(err, "formatting") + } + return buf, nil +} +*/ + +func parseFile(fname string, source io.Reader) (ast.Node, error) { + f, err := parser.ParseFile(token.NewFileSet(), fname, source, parser.DeclarationErrors) + if err != nil { + return nil, err + } + return f, nil +} + +func extractContext(f ast.Node) (*sourceContext, error) { + context := &sourceContext{} + visitor := &parseVisitor{src: context} + + ast.Walk(visitor, f) + + return context, context.validate() +} + +func splat(dir string, tree files) error { + for fn, buf := range tree { + if err := splatFile(path.Join(dir, fn), buf); err != nil { + return err + } + } + return nil +} + +func splatFile(target string, buf io.Reader) error { + err := os.MkdirAll(path.Dir(target), os.ModePerm) + if err != nil { + return errors.Wrapf(err, "Couldn't create directory for %q", target) + } + f, err := os.Create(target) + if err != nil { + return errors.Wrapf(err, "Couldn't create file %q", target) + } + defer f.Close() + _, err = io.Copy(f, buf) + return errors.Wrapf(err, "Error writing data to file %q", target) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/main_test.go b/vendor/github.com/go-kit/kit/cmd/kitgen/main_test.go new file mode 100644 index 000000000..4ef5013a8 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/main_test.go @@ -0,0 +1,113 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "os/exec" + "path/filepath" + "strings" + "testing" +) + +var update = flag.Bool("update", false, "update golden files") + +func TestProcess(t *testing.T) { + cases, err := filepath.Glob("testdata/*") + if err != nil { + t.Fatal(err) + } + + laidout := func(t *testing.T, inpath, dir, kind string, layout layout, in []byte) { + t.Run(kind, func(t *testing.T) { + targetDir := filepath.Join(dir, kind) + tree, err := process(inpath, bytes.NewBuffer(in), layout) + if err != nil { + t.Fatal(inpath, fmt.Sprintf("%+#v", err)) + } + + if *update { + err := splat(targetDir, tree) + if err != nil { + t.Fatal(kind, err) + } + // otherwise we need to do some tomfoolery with resetting buffers + // I'm willing to just run the tests again - besides, we shouldn't be + // regerating the golden files that often + t.Error("Updated outputs - DID NOT COMPARE! (run tests again without -update)") + return + } + + for filename, buf := range tree { + actual, err := ioutil.ReadAll(buf) + if err != nil { + t.Fatal(kind, filename, err) + } + + outpath := filepath.Join(targetDir, filename) + + expected, err := ioutil.ReadFile(outpath) + if err != nil { + t.Fatal(outpath, err) + } + + if !bytes.Equal(expected, actual) { + name := kind + filename + name = strings.Replace(name, "/", "-", -1) + + errfile, err := ioutil.TempFile("", name) + if err != nil { + t.Fatal("opening tempfile for output", err) + } + io.WriteString(errfile, string(actual)) + + diffCmd := exec.Command("diff", outpath, errfile.Name()) + diffOut, _ := diffCmd.Output() + t.Log(string(diffOut)) + t.Errorf("Processing output didn't match %q. Results recorded in %q.", outpath, errfile.Name()) + } + } + + if !t.Failed() { + build := exec.Command("go", "build", "./...") + build.Dir = targetDir + out, err := build.CombinedOutput() + if err != nil { + t.Fatalf("Cannot build output: %v\n%s", err, string(out)) + } + } + }) + + } + + testcase := func(dir string) { + name := filepath.Base(dir) + t.Run(name, func(t *testing.T) { + inpath := filepath.Join(dir, "in.go") + + in, err := ioutil.ReadFile(inpath) + if err != nil { + t.Fatal(inpath, err) + } + laidout(t, inpath, dir, "flat", flat{}, in) + laidout(t, inpath, dir, "default", deflayout{ + targetDir: filepath.Join("github.com/go-kit/kit/cmd/kitgen", dir, "default"), + }, in) + }) + } + + for _, dir := range cases { + testcase(dir) + } +} + +func TestTemplatesBuild(t *testing.T) { + build := exec.Command("go", "build", "./...") + build.Dir = "templates" + out, err := build.CombinedOutput() + if err != nil { + t.Fatal(err, "\n", string(out)) + } +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/method.go b/vendor/github.com/go-kit/kit/cmd/kitgen/method.go new file mode 100644 index 000000000..f7a6da1b0 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/method.go @@ -0,0 +1,220 @@ +package main + +import ( + "go/ast" + "go/token" + "strings" +) + +type method struct { + name *ast.Ident + params []arg + results []arg + structsResolved bool +} + +func (m method) definition(ifc iface) ast.Decl { + notImpl := fetchFuncDecl("ExampleEndpoint") + + notImpl.Name = m.name + notImpl.Recv = fieldList(ifc.reciever()) + scope := scopeWith(notImpl.Recv.List[0].Names[0].Name) + notImpl.Type.Params = m.funcParams(scope) + notImpl.Type.Results = m.funcResults() + + return notImpl +} + +func (m method) endpointMaker(ifc iface) ast.Decl { + endpointFn := fetchFuncDecl("makeExampleEndpoint") + scope := scopeWith("ctx", "req", ifc.receiverName().Name) + + anonFunc := endpointFn.Body.List[0].(*ast.ReturnStmt).Results[0].(*ast.FuncLit) + if !m.hasContext() { + // strip context param from endpoint function + anonFunc.Type.Params.List = anonFunc.Type.Params.List[1:] + } + + anonFunc = replaceIdent(anonFunc, "ExampleRequest", m.requestStructName()).(*ast.FuncLit) + callMethod := m.called(ifc, scope, "ctx", "req") + anonFunc.Body.List[1] = callMethod + anonFunc.Body.List[2].(*ast.ReturnStmt).Results[0] = m.wrapResult(callMethod.Lhs) + + endpointFn.Body.List[0].(*ast.ReturnStmt).Results[0] = anonFunc + endpointFn.Name = m.endpointMakerName() + endpointFn.Type.Params = fieldList(ifc.reciever()) + endpointFn.Type.Results = fieldList(typeField(sel(id("endpoint"), id("Endpoint")))) + return endpointFn +} + +func (m method) pathName() string { + return "/" + strings.ToLower(m.name.Name) +} + +func (m method) encodeFuncName() *ast.Ident { + return id("Encode" + m.name.Name + "Response") +} + +func (m method) decodeFuncName() *ast.Ident { + return id("Decode" + m.name.Name + "Request") +} + +func (m method) resultNames(scope *ast.Scope) []*ast.Ident { + ids := []*ast.Ident{} + for _, rz := range m.results { + ids = append(ids, rz.chooseName(scope)) + } + return ids +} + +func (m method) called(ifc iface, scope *ast.Scope, ctxName, spreadStruct string) *ast.AssignStmt { + m.resolveStructNames() + + resNamesExpr := []ast.Expr{} + for _, r := range m.resultNames(scope) { + resNamesExpr = append(resNamesExpr, ast.Expr(r)) + } + + arglist := []ast.Expr{} + if m.hasContext() { + arglist = append(arglist, id(ctxName)) + } + ssid := id(spreadStruct) + for _, f := range m.requestStructFields().List { + arglist = append(arglist, sel(ssid, f.Names[0])) + } + + return &ast.AssignStmt{ + Lhs: resNamesExpr, + Tok: token.DEFINE, + Rhs: []ast.Expr{ + &ast.CallExpr{ + Fun: sel(ifc.receiverName(), m.name), + Args: arglist, + }, + }, + } +} + +func (m method) wrapResult(results []ast.Expr) ast.Expr { + kvs := []ast.Expr{} + m.resolveStructNames() + + for i, a := range m.results { + kvs = append(kvs, &ast.KeyValueExpr{ + Key: ast.NewIdent(export(a.asField.Name)), + Value: results[i], + }) + } + return &ast.CompositeLit{ + Type: m.responseStructName(), + Elts: kvs, + } +} + +func (m method) resolveStructNames() { + if m.structsResolved { + return + } + m.structsResolved = true + scope := ast.NewScope(nil) + for i, p := range m.params { + p.asField = p.chooseName(scope) + m.params[i] = p + } + scope = ast.NewScope(nil) + for i, r := range m.results { + r.asField = r.chooseName(scope) + m.results[i] = r + } +} + +func (m method) decoderFunc() ast.Decl { + fn := fetchFuncDecl("DecodeExampleRequest") + fn.Name = m.decodeFuncName() + fn = replaceIdent(fn, "ExampleRequest", m.requestStructName()).(*ast.FuncDecl) + return fn +} + +func (m method) encoderFunc() ast.Decl { + fn := fetchFuncDecl("EncodeExampleResponse") + fn.Name = m.encodeFuncName() + return fn +} + +func (m method) endpointMakerName() *ast.Ident { + return id("Make" + m.name.Name + "Endpoint") +} + +func (m method) requestStruct() ast.Decl { + m.resolveStructNames() + return structDecl(m.requestStructName(), m.requestStructFields()) +} + +func (m method) responseStruct() ast.Decl { + m.resolveStructNames() + return structDecl(m.responseStructName(), m.responseStructFields()) +} + +func (m method) hasContext() bool { + if len(m.params) < 1 { + return false + } + carg := m.params[0].typ + // ugh. this is maybe okay for the one-off, but a general case for matching + // types would be helpful + if sel, is := carg.(*ast.SelectorExpr); is && sel.Sel.Name == "Context" { + if id, is := sel.X.(*ast.Ident); is && id.Name == "context" { + return true + } + } + return false +} + +func (m method) nonContextParams() []arg { + if m.hasContext() { + return m.params[1:] + } + return m.params +} + +func (m method) funcParams(scope *ast.Scope) *ast.FieldList { + parms := &ast.FieldList{} + if m.hasContext() { + parms.List = []*ast.Field{{ + Names: []*ast.Ident{ast.NewIdent("ctx")}, + Type: sel(id("context"), id("Context")), + }} + scope.Insert(ast.NewObj(ast.Var, "ctx")) + } + parms.List = append(parms.List, mappedFieldList(func(a arg) *ast.Field { + return a.field(scope) + }, m.nonContextParams()...).List...) + return parms +} + +func (m method) funcResults() *ast.FieldList { + return mappedFieldList(func(a arg) *ast.Field { + return a.result() + }, m.results...) +} + +func (m method) requestStructName() *ast.Ident { + return id(export(m.name.Name) + "Request") +} + +func (m method) requestStructFields() *ast.FieldList { + return mappedFieldList(func(a arg) *ast.Field { + return a.exported() + }, m.nonContextParams()...) +} + +func (m method) responseStructName() *ast.Ident { + return id(export(m.name.Name) + "Response") +} + +func (m method) responseStructFields() *ast.FieldList { + return mappedFieldList(func(a arg) *ast.Field { + return a.exported() + }, m.results...) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/parsevisitor.go b/vendor/github.com/go-kit/kit/cmd/kitgen/parsevisitor.go new file mode 100644 index 000000000..aa5131343 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/parsevisitor.go @@ -0,0 +1,178 @@ +package main + +import ( + "go/ast" +) + +type ( + parseVisitor struct { + src *sourceContext + } + + typeSpecVisitor struct { + src *sourceContext + node *ast.TypeSpec + iface *iface + name *ast.Ident + } + + interfaceTypeVisitor struct { + node *ast.TypeSpec + ts *typeSpecVisitor + methods []method + } + + methodVisitor struct { + depth int + node *ast.TypeSpec + list *[]method + name *ast.Ident + params, results *[]arg + isMethod bool + } + + argListVisitor struct { + list *[]arg + } + + argVisitor struct { + node *ast.TypeSpec + parts []ast.Expr + list *[]arg + } +) + +func (v *parseVisitor) Visit(n ast.Node) ast.Visitor { + switch rn := n.(type) { + default: + return v + case *ast.File: + v.src.pkg = rn.Name + return v + case *ast.ImportSpec: + v.src.imports = append(v.src.imports, rn) + return nil + + case *ast.TypeSpec: + switch rn.Type.(type) { + default: + v.src.types = append(v.src.types, rn) + case *ast.InterfaceType: + // can't output interfaces + // because they'd conflict with our implementations + } + return &typeSpecVisitor{src: v.src, node: rn} + } +} + +/* +package foo + +type FooService interface { + Bar(ctx context.Context, i int, s string) (string, error) +} +*/ + +func (v *typeSpecVisitor) Visit(n ast.Node) ast.Visitor { + switch rn := n.(type) { + default: + return v + case *ast.Ident: + if v.name == nil { + v.name = rn + } + return v + case *ast.InterfaceType: + return &interfaceTypeVisitor{ts: v, methods: []method{}} + case nil: + if v.iface != nil { + v.iface.name = v.name + sn := *v.name + v.iface.stubname = &sn + v.iface.stubname.Name = v.name.String() + v.src.interfaces = append(v.src.interfaces, *v.iface) + } + return nil + } +} + +func (v *interfaceTypeVisitor) Visit(n ast.Node) ast.Visitor { + switch n.(type) { + default: + return v + case *ast.Field: + return &methodVisitor{list: &v.methods} + case nil: + v.ts.iface = &iface{methods: v.methods} + return nil + } +} + +func (v *methodVisitor) Visit(n ast.Node) ast.Visitor { + switch rn := n.(type) { + default: + v.depth++ + return v + case *ast.Ident: + if rn.IsExported() { + v.name = rn + } + v.depth++ + return v + case *ast.FuncType: + v.depth++ + v.isMethod = true + return v + case *ast.FieldList: + if v.params == nil { + v.params = &[]arg{} + return &argListVisitor{list: v.params} + } + if v.results == nil { + v.results = &[]arg{} + } + return &argListVisitor{list: v.results} + case nil: + v.depth-- + if v.depth == 0 && v.isMethod && v.name != nil { + *v.list = append(*v.list, method{name: v.name, params: *v.params, results: *v.results}) + } + return nil + } +} + +func (v *argListVisitor) Visit(n ast.Node) ast.Visitor { + switch n.(type) { + default: + return nil + case *ast.Field: + return &argVisitor{list: v.list} + } +} + +func (v *argVisitor) Visit(n ast.Node) ast.Visitor { + switch t := n.(type) { + case *ast.CommentGroup, *ast.BasicLit: + return nil + case *ast.Ident: //Expr -> everything, but clarity + if t.Name != "_" { + v.parts = append(v.parts, t) + } + case ast.Expr: + v.parts = append(v.parts, t) + case nil: + names := v.parts[:len(v.parts)-1] + tp := v.parts[len(v.parts)-1] + if len(names) == 0 { + *v.list = append(*v.list, arg{typ: tp}) + return nil + } + for _, n := range names { + *v.list = append(*v.list, arg{ + name: n.(*ast.Ident), + typ: tp, + }) + } + } + return nil +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/path_test.go b/vendor/github.com/go-kit/kit/cmd/kitgen/path_test.go new file mode 100644 index 000000000..371ded1d1 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/path_test.go @@ -0,0 +1,43 @@ +package main + +import ( + "fmt" + "strings" + "testing" +) + +func TestImportPath(t *testing.T) { + testcase := func(gopath, targetpath, expected string) { + t.Run(fmt.Sprintf("%q + %q", gopath, targetpath), func(t *testing.T) { + actual, err := importPath(targetpath, gopath) + if err != nil { + t.Fatalf("Expected no error, got %q", err) + } + if actual != expected { + t.Errorf("Expected %q, got %q", expected, actual) + } + }) + } + + testcase("/gopath/", "/gopath/src/somewhere", "somewhere") + testcase("/gopath", "/gopath/src/somewhere", "somewhere") + testcase("/gopath:/other", "/gopath/src/somewhere", "somewhere") + testcase("/other:/gopath/", "/gopath/src/somewhere", "somewhere") +} + +func TestImportPathSadpath(t *testing.T) { + testcase := func(gopath, targetpath, expected string) { + t.Run(fmt.Sprintf("%q + %q", gopath, targetpath), func(t *testing.T) { + actual, err := importPath(targetpath, gopath) + if actual != "" { + t.Errorf("Expected empty path, got %q", actual) + } + if strings.Index(err.Error(), expected) == -1 { + t.Errorf("Expected %q to include %q", err, expected) + } + }) + } + + testcase("", "/gopath/src/somewhere", "is not in") + testcase("", "./somewhere", "not an absolute") +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/replacewalk.go b/vendor/github.com/go-kit/kit/cmd/kitgen/replacewalk.go new file mode 100644 index 000000000..f6b70dcd9 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/replacewalk.go @@ -0,0 +1,759 @@ +package main + +import ( + "fmt" + "go/ast" +) + +// A Visitor's Visit method is invoked for each node encountered by walkToReplace. +// If the result visitor w is not nil, walkToReplace visits each of the children +// of node with the visitor w, followed by a call of w.Visit(nil). +type Visitor interface { + Visit(node ast.Node, replace func(ast.Node)) (w Visitor) +} + +// Helper functions for common node lists. They may be empty. + +func walkIdentList(v Visitor, list []*ast.Ident) { + for i, x := range list { + walkToReplace(v, x, func(r ast.Node) { + list[i] = r.(*ast.Ident) + }) + } +} + +func walkExprList(v Visitor, list []ast.Expr) { + for i, x := range list { + walkToReplace(v, x, func(r ast.Node) { + list[i] = r.(ast.Expr) + }) + } +} + +func walkStmtList(v Visitor, list []ast.Stmt) { + for i, x := range list { + walkToReplace(v, x, func(r ast.Node) { + list[i] = r.(ast.Stmt) + }) + } +} + +func walkDeclList(v Visitor, list []ast.Decl) { + for i, x := range list { + walkToReplace(v, x, func(r ast.Node) { + list[i] = r.(ast.Decl) + }) + } +} + +// WalkToReplace traverses an AST in depth-first order: It starts by calling +// v.Visit(node); node must not be nil. If the visitor w returned by +// v.Visit(node) is not nil, walkToReplace is invoked recursively with visitor +// w for each of the non-nil children of node, followed by a call of +// w.Visit(nil). +func WalkReplace(v Visitor, node ast.Node) (replacement ast.Node) { + walkToReplace(v, node, func(r ast.Node) { + replacement = r + }) + return +} + +func walkToReplace(v Visitor, node ast.Node, replace func(ast.Node)) { + if v == nil { + return + } + var replacement ast.Node + repl := func(r ast.Node) { + replacement = r + replace(r) + } + + v = v.Visit(node, repl) + + if replacement != nil { + return + } + + // walk children + // (the order of the cases matches the order + // of the corresponding node types in ast.go) + switch n := node.(type) { + + // These are all leaves, so there's no sub-walk to do. + // We just need to replace them on their parent with a copy. + case *ast.Comment: + cpy := *n + replace(&cpy) + case *ast.BadExpr: + cpy := *n + replace(&cpy) + case *ast.Ident: + cpy := *n + replace(&cpy) + case *ast.BasicLit: + cpy := *n + replace(&cpy) + case *ast.BadDecl: + cpy := *n + replace(&cpy) + case *ast.EmptyStmt: + cpy := *n + replace(&cpy) + case *ast.BadStmt: + cpy := *n + replace(&cpy) + + case *ast.CommentGroup: + cpy := *n + + if n.List != nil { + cpy.List = make([]*ast.Comment, len(n.List)) + copy(cpy.List, n.List) + } + + for i, c := range cpy.List { + walkToReplace(v, c, func(r ast.Node) { + cpy.List[i] = r.(*ast.Comment) + }) + } + replace(&cpy) + + case *ast.Field: + cpy := *n + if n.Names != nil { + cpy.Names = make([]*ast.Ident, len(n.Names)) + copy(cpy.Names, n.Names) + } + + if cpy.Doc != nil { + walkToReplace(v, cpy.Doc, func(r ast.Node) { + cpy.Doc = r.(*ast.CommentGroup) + }) + } + walkIdentList(v, cpy.Names) + + walkToReplace(v, cpy.Type, func(r ast.Node) { + cpy.Type = r.(ast.Expr) + }) + if cpy.Tag != nil { + walkToReplace(v, cpy.Tag, func(r ast.Node) { + cpy.Tag = r.(*ast.BasicLit) + }) + } + if cpy.Comment != nil { + walkToReplace(v, cpy.Comment, func(r ast.Node) { + cpy.Comment = r.(*ast.CommentGroup) + }) + } + replace(&cpy) + + case *ast.FieldList: + cpy := *n + if n.List != nil { + cpy.List = make([]*ast.Field, len(n.List)) + copy(cpy.List, n.List) + } + + for i, f := range cpy.List { + walkToReplace(v, f, func(r ast.Node) { + cpy.List[i] = r.(*ast.Field) + }) + } + + replace(&cpy) + + case *ast.Ellipsis: + cpy := *n + + if cpy.Elt != nil { + walkToReplace(v, cpy.Elt, func(r ast.Node) { + cpy.Elt = r.(ast.Expr) + }) + } + + replace(&cpy) + + case *ast.FuncLit: + cpy := *n + walkToReplace(v, cpy.Type, func(r ast.Node) { + cpy.Type = r.(*ast.FuncType) + }) + walkToReplace(v, cpy.Body, func(r ast.Node) { + cpy.Body = r.(*ast.BlockStmt) + }) + + replace(&cpy) + case *ast.CompositeLit: + cpy := *n + if n.Elts != nil { + cpy.Elts = make([]ast.Expr, len(n.Elts)) + copy(cpy.Elts, n.Elts) + } + + if cpy.Type != nil { + walkToReplace(v, cpy.Type, func(r ast.Node) { + cpy.Type = r.(ast.Expr) + }) + } + walkExprList(v, cpy.Elts) + + replace(&cpy) + case *ast.ParenExpr: + cpy := *n + walkToReplace(v, cpy.X, func(r ast.Node) { + cpy.X = r.(ast.Expr) + }) + + replace(&cpy) + case *ast.SelectorExpr: + cpy := *n + walkToReplace(v, cpy.X, func(r ast.Node) { + cpy.X = r.(ast.Expr) + }) + walkToReplace(v, cpy.Sel, func(r ast.Node) { + cpy.Sel = r.(*ast.Ident) + }) + + replace(&cpy) + case *ast.IndexExpr: + cpy := *n + walkToReplace(v, cpy.X, func(r ast.Node) { + cpy.X = r.(ast.Expr) + }) + walkToReplace(v, cpy.Index, func(r ast.Node) { + cpy.Index = r.(ast.Expr) + }) + + replace(&cpy) + case *ast.SliceExpr: + cpy := *n + walkToReplace(v, cpy.X, func(r ast.Node) { + cpy.X = r.(ast.Expr) + }) + if cpy.Low != nil { + walkToReplace(v, cpy.Low, func(r ast.Node) { + cpy.Low = r.(ast.Expr) + }) + } + if cpy.High != nil { + walkToReplace(v, cpy.High, func(r ast.Node) { + cpy.High = r.(ast.Expr) + }) + } + if cpy.Max != nil { + walkToReplace(v, cpy.Max, func(r ast.Node) { + cpy.Max = r.(ast.Expr) + }) + } + + replace(&cpy) + case *ast.TypeAssertExpr: + cpy := *n + walkToReplace(v, cpy.X, func(r ast.Node) { + cpy.X = r.(ast.Expr) + }) + if cpy.Type != nil { + walkToReplace(v, cpy.Type, func(r ast.Node) { + cpy.Type = r.(ast.Expr) + }) + } + replace(&cpy) + case *ast.CallExpr: + cpy := *n + if n.Args != nil { + cpy.Args = make([]ast.Expr, len(n.Args)) + copy(cpy.Args, n.Args) + } + + walkToReplace(v, cpy.Fun, func(r ast.Node) { + cpy.Fun = r.(ast.Expr) + }) + walkExprList(v, cpy.Args) + + replace(&cpy) + case *ast.StarExpr: + cpy := *n + walkToReplace(v, cpy.X, func(r ast.Node) { + cpy.X = r.(ast.Expr) + }) + + replace(&cpy) + case *ast.UnaryExpr: + cpy := *n + walkToReplace(v, cpy.X, func(r ast.Node) { + cpy.X = r.(ast.Expr) + }) + + replace(&cpy) + case *ast.BinaryExpr: + cpy := *n + walkToReplace(v, cpy.X, func(r ast.Node) { + cpy.X = r.(ast.Expr) + }) + walkToReplace(v, cpy.Y, func(r ast.Node) { + cpy.Y = r.(ast.Expr) + }) + + replace(&cpy) + case *ast.KeyValueExpr: + cpy := *n + walkToReplace(v, cpy.Key, func(r ast.Node) { + cpy.Key = r.(ast.Expr) + }) + walkToReplace(v, cpy.Value, func(r ast.Node) { + cpy.Value = r.(ast.Expr) + }) + + replace(&cpy) + + // Types + case *ast.ArrayType: + cpy := *n + if cpy.Len != nil { + walkToReplace(v, cpy.Len, func(r ast.Node) { + cpy.Len = r.(ast.Expr) + }) + } + walkToReplace(v, cpy.Elt, func(r ast.Node) { + cpy.Elt = r.(ast.Expr) + }) + + replace(&cpy) + case *ast.StructType: + cpy := *n + walkToReplace(v, cpy.Fields, func(r ast.Node) { + cpy.Fields = r.(*ast.FieldList) + }) + + replace(&cpy) + case *ast.FuncType: + cpy := *n + if cpy.Params != nil { + walkToReplace(v, cpy.Params, func(r ast.Node) { + cpy.Params = r.(*ast.FieldList) + }) + } + if cpy.Results != nil { + walkToReplace(v, cpy.Results, func(r ast.Node) { + cpy.Results = r.(*ast.FieldList) + }) + } + + replace(&cpy) + case *ast.InterfaceType: + cpy := *n + walkToReplace(v, cpy.Methods, func(r ast.Node) { + cpy.Methods = r.(*ast.FieldList) + }) + + replace(&cpy) + case *ast.MapType: + cpy := *n + walkToReplace(v, cpy.Key, func(r ast.Node) { + cpy.Key = r.(ast.Expr) + }) + walkToReplace(v, cpy.Value, func(r ast.Node) { + cpy.Value = r.(ast.Expr) + }) + + replace(&cpy) + case *ast.ChanType: + cpy := *n + walkToReplace(v, cpy.Value, func(r ast.Node) { + cpy.Value = r.(ast.Expr) + }) + + replace(&cpy) + case *ast.DeclStmt: + cpy := *n + walkToReplace(v, cpy.Decl, func(r ast.Node) { + cpy.Decl = r.(ast.Decl) + }) + + replace(&cpy) + case *ast.LabeledStmt: + cpy := *n + walkToReplace(v, cpy.Label, func(r ast.Node) { + cpy.Label = r.(*ast.Ident) + }) + walkToReplace(v, cpy.Stmt, func(r ast.Node) { + cpy.Stmt = r.(ast.Stmt) + }) + + replace(&cpy) + case *ast.ExprStmt: + cpy := *n + walkToReplace(v, cpy.X, func(r ast.Node) { + cpy.X = r.(ast.Expr) + }) + + replace(&cpy) + case *ast.SendStmt: + cpy := *n + walkToReplace(v, cpy.Chan, func(r ast.Node) { + cpy.Chan = r.(ast.Expr) + }) + walkToReplace(v, cpy.Value, func(r ast.Node) { + cpy.Value = r.(ast.Expr) + }) + + replace(&cpy) + case *ast.IncDecStmt: + cpy := *n + walkToReplace(v, cpy.X, func(r ast.Node) { + cpy.X = r.(ast.Expr) + }) + + replace(&cpy) + case *ast.AssignStmt: + cpy := *n + if n.Lhs != nil { + cpy.Lhs = make([]ast.Expr, len(n.Lhs)) + copy(cpy.Lhs, n.Lhs) + } + if n.Rhs != nil { + cpy.Rhs = make([]ast.Expr, len(n.Rhs)) + copy(cpy.Rhs, n.Rhs) + } + + walkExprList(v, cpy.Lhs) + walkExprList(v, cpy.Rhs) + + replace(&cpy) + case *ast.GoStmt: + cpy := *n + walkToReplace(v, cpy.Call, func(r ast.Node) { + cpy.Call = r.(*ast.CallExpr) + }) + + replace(&cpy) + case *ast.DeferStmt: + cpy := *n + walkToReplace(v, cpy.Call, func(r ast.Node) { + cpy.Call = r.(*ast.CallExpr) + }) + + replace(&cpy) + case *ast.ReturnStmt: + cpy := *n + if n.Results != nil { + cpy.Results = make([]ast.Expr, len(n.Results)) + copy(cpy.Results, n.Results) + } + + walkExprList(v, cpy.Results) + + replace(&cpy) + case *ast.BranchStmt: + cpy := *n + if cpy.Label != nil { + walkToReplace(v, cpy.Label, func(r ast.Node) { + cpy.Label = r.(*ast.Ident) + }) + } + + replace(&cpy) + case *ast.BlockStmt: + cpy := *n + if n.List != nil { + cpy.List = make([]ast.Stmt, len(n.List)) + copy(cpy.List, n.List) + } + + walkStmtList(v, cpy.List) + + replace(&cpy) + case *ast.IfStmt: + cpy := *n + + if cpy.Init != nil { + walkToReplace(v, cpy.Init, func(r ast.Node) { + cpy.Init = r.(ast.Stmt) + }) + } + walkToReplace(v, cpy.Cond, func(r ast.Node) { + cpy.Cond = r.(ast.Expr) + }) + walkToReplace(v, cpy.Body, func(r ast.Node) { + cpy.Body = r.(*ast.BlockStmt) + }) + if cpy.Else != nil { + walkToReplace(v, cpy.Else, func(r ast.Node) { + cpy.Else = r.(ast.Stmt) + }) + } + + replace(&cpy) + case *ast.CaseClause: + cpy := *n + if n.List != nil { + cpy.List = make([]ast.Expr, len(n.List)) + copy(cpy.List, n.List) + } + if n.Body != nil { + cpy.Body = make([]ast.Stmt, len(n.Body)) + copy(cpy.Body, n.Body) + } + + walkExprList(v, cpy.List) + walkStmtList(v, cpy.Body) + + replace(&cpy) + case *ast.SwitchStmt: + cpy := *n + if cpy.Init != nil { + walkToReplace(v, cpy.Init, func(r ast.Node) { + cpy.Init = r.(ast.Stmt) + }) + } + if cpy.Tag != nil { + walkToReplace(v, cpy.Tag, func(r ast.Node) { + cpy.Tag = r.(ast.Expr) + }) + } + walkToReplace(v, cpy.Body, func(r ast.Node) { + cpy.Body = r.(*ast.BlockStmt) + }) + + replace(&cpy) + case *ast.TypeSwitchStmt: + cpy := *n + if cpy.Init != nil { + walkToReplace(v, cpy.Init, func(r ast.Node) { + cpy.Init = r.(ast.Stmt) + }) + } + walkToReplace(v, cpy.Assign, func(r ast.Node) { + cpy.Assign = r.(ast.Stmt) + }) + walkToReplace(v, cpy.Body, func(r ast.Node) { + cpy.Body = r.(*ast.BlockStmt) + }) + + replace(&cpy) + case *ast.CommClause: + cpy := *n + if n.Body != nil { + cpy.Body = make([]ast.Stmt, len(n.Body)) + copy(cpy.Body, n.Body) + } + + if cpy.Comm != nil { + walkToReplace(v, cpy.Comm, func(r ast.Node) { + cpy.Comm = r.(ast.Stmt) + }) + } + walkStmtList(v, cpy.Body) + + replace(&cpy) + case *ast.SelectStmt: + cpy := *n + walkToReplace(v, cpy.Body, func(r ast.Node) { + cpy.Body = r.(*ast.BlockStmt) + }) + + replace(&cpy) + case *ast.ForStmt: + cpy := *n + if cpy.Init != nil { + walkToReplace(v, cpy.Init, func(r ast.Node) { + cpy.Init = r.(ast.Stmt) + }) + } + if cpy.Cond != nil { + walkToReplace(v, cpy.Cond, func(r ast.Node) { + cpy.Cond = r.(ast.Expr) + }) + } + if cpy.Post != nil { + walkToReplace(v, cpy.Post, func(r ast.Node) { + cpy.Post = r.(ast.Stmt) + }) + } + walkToReplace(v, cpy.Body, func(r ast.Node) { + cpy.Body = r.(*ast.BlockStmt) + }) + + replace(&cpy) + case *ast.RangeStmt: + cpy := *n + if cpy.Key != nil { + walkToReplace(v, cpy.Key, func(r ast.Node) { + cpy.Key = r.(ast.Expr) + }) + } + if cpy.Value != nil { + walkToReplace(v, cpy.Value, func(r ast.Node) { + cpy.Value = r.(ast.Expr) + }) + } + walkToReplace(v, cpy.X, func(r ast.Node) { + cpy.X = r.(ast.Expr) + }) + walkToReplace(v, cpy.Body, func(r ast.Node) { + cpy.Body = r.(*ast.BlockStmt) + }) + + // Declarations + replace(&cpy) + case *ast.ImportSpec: + cpy := *n + if cpy.Doc != nil { + walkToReplace(v, cpy.Doc, func(r ast.Node) { + cpy.Doc = r.(*ast.CommentGroup) + }) + } + if cpy.Name != nil { + walkToReplace(v, cpy.Name, func(r ast.Node) { + cpy.Name = r.(*ast.Ident) + }) + } + walkToReplace(v, cpy.Path, func(r ast.Node) { + cpy.Path = r.(*ast.BasicLit) + }) + if cpy.Comment != nil { + walkToReplace(v, cpy.Comment, func(r ast.Node) { + cpy.Comment = r.(*ast.CommentGroup) + }) + } + + replace(&cpy) + case *ast.ValueSpec: + cpy := *n + if n.Names != nil { + cpy.Names = make([]*ast.Ident, len(n.Names)) + copy(cpy.Names, n.Names) + } + if n.Values != nil { + cpy.Values = make([]ast.Expr, len(n.Values)) + copy(cpy.Values, n.Values) + } + + if cpy.Doc != nil { + walkToReplace(v, cpy.Doc, func(r ast.Node) { + cpy.Doc = r.(*ast.CommentGroup) + }) + } + + walkIdentList(v, cpy.Names) + + if cpy.Type != nil { + walkToReplace(v, cpy.Type, func(r ast.Node) { + cpy.Type = r.(ast.Expr) + }) + } + + walkExprList(v, cpy.Values) + + if cpy.Comment != nil { + walkToReplace(v, cpy.Comment, func(r ast.Node) { + cpy.Comment = r.(*ast.CommentGroup) + }) + } + + replace(&cpy) + + case *ast.TypeSpec: + cpy := *n + + if cpy.Doc != nil { + walkToReplace(v, cpy.Doc, func(r ast.Node) { + cpy.Doc = r.(*ast.CommentGroup) + }) + } + walkToReplace(v, cpy.Name, func(r ast.Node) { + cpy.Name = r.(*ast.Ident) + }) + walkToReplace(v, cpy.Type, func(r ast.Node) { + cpy.Type = r.(ast.Expr) + }) + if cpy.Comment != nil { + walkToReplace(v, cpy.Comment, func(r ast.Node) { + cpy.Comment = r.(*ast.CommentGroup) + }) + } + + replace(&cpy) + case *ast.GenDecl: + cpy := *n + if n.Specs != nil { + cpy.Specs = make([]ast.Spec, len(n.Specs)) + copy(cpy.Specs, n.Specs) + } + + if cpy.Doc != nil { + walkToReplace(v, cpy.Doc, func(r ast.Node) { + cpy.Doc = r.(*ast.CommentGroup) + }) + } + for i, s := range cpy.Specs { + walkToReplace(v, s, func(r ast.Node) { + cpy.Specs[i] = r.(ast.Spec) + }) + } + + replace(&cpy) + case *ast.FuncDecl: + cpy := *n + + if cpy.Doc != nil { + walkToReplace(v, cpy.Doc, func(r ast.Node) { + cpy.Doc = r.(*ast.CommentGroup) + }) + } + if cpy.Recv != nil { + walkToReplace(v, cpy.Recv, func(r ast.Node) { + cpy.Recv = r.(*ast.FieldList) + }) + } + walkToReplace(v, cpy.Name, func(r ast.Node) { + cpy.Name = r.(*ast.Ident) + }) + walkToReplace(v, cpy.Type, func(r ast.Node) { + cpy.Type = r.(*ast.FuncType) + }) + if cpy.Body != nil { + walkToReplace(v, cpy.Body, func(r ast.Node) { + cpy.Body = r.(*ast.BlockStmt) + }) + } + + // Files and packages + replace(&cpy) + case *ast.File: + cpy := *n + + if cpy.Doc != nil { + walkToReplace(v, cpy.Doc, func(r ast.Node) { + cpy.Doc = r.(*ast.CommentGroup) + }) + } + walkToReplace(v, cpy.Name, func(r ast.Node) { + cpy.Name = r.(*ast.Ident) + }) + walkDeclList(v, cpy.Decls) + // don't walk cpy.Comments - they have been + // visited already through the individual + // nodes + + replace(&cpy) + case *ast.Package: + cpy := *n + cpy.Files = map[string]*ast.File{} + + for i, f := range n.Files { + cpy.Files[i] = f + walkToReplace(v, f, func(r ast.Node) { + cpy.Files[i] = r.(*ast.File) + }) + } + replace(&cpy) + + default: + panic(fmt.Sprintf("walkToReplace: unexpected node type %T", n)) + } + + if v != nil { + v.Visit(nil, func(ast.Node) { panic("can't replace the go-up nil") }) + } +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/sourcecontext.go b/vendor/github.com/go-kit/kit/cmd/kitgen/sourcecontext.go new file mode 100644 index 000000000..35933a20f --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/sourcecontext.go @@ -0,0 +1,53 @@ +package main + +import ( + "fmt" + "go/ast" + "go/token" +) + +type sourceContext struct { + pkg *ast.Ident + imports []*ast.ImportSpec + interfaces []iface + types []*ast.TypeSpec +} + +func (sc *sourceContext) validate() error { + if len(sc.interfaces) != 1 { + return fmt.Errorf("found %d interfaces, expecting exactly 1", len(sc.interfaces)) + } + for _, i := range sc.interfaces { + for _, m := range i.methods { + if len(m.results) < 1 { + return fmt.Errorf("method %q of interface %q has no result types", m.name, i.name) + } + } + } + return nil +} + +func (sc *sourceContext) importDecls() (decls []ast.Decl) { + have := map[string]struct{}{} + notHave := func(is *ast.ImportSpec) bool { + if _, has := have[is.Path.Value]; has { + return false + } + have[is.Path.Value] = struct{}{} + return true + } + + for _, is := range sc.imports { + if notHave(is) { + decls = append(decls, importFor(is)) + } + } + + for _, is := range fetchImports() { + if notHave(is) { + decls = append(decls, &ast.GenDecl{Tok: token.IMPORT, Specs: []ast.Spec{is}}) + } + } + + return +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/templates/full.go b/vendor/github.com/go-kit/kit/cmd/kitgen/templates/full.go new file mode 100644 index 000000000..c3516856a --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/templates/full.go @@ -0,0 +1,60 @@ +package foo + +import ( + "context" + "encoding/json" + "errors" + "net/http" + + "github.com/go-kit/kit/endpoint" + httptransport "github.com/go-kit/kit/transport/http" +) + +type ExampleService struct { +} + +type ExampleRequest struct { + I int + S string +} +type ExampleResponse struct { + S string + Err error +} + +type Endpoints struct { + ExampleEndpoint endpoint.Endpoint +} + +func (f ExampleService) ExampleEndpoint(ctx context.Context, i int, s string) (string, error) { + panic(errors.New("not implemented")) +} + +func makeExampleEndpoint(f ExampleService) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(ExampleRequest) + s, err := f.ExampleEndpoint(ctx, req.I, req.S) + return ExampleResponse{S: s, Err: err}, nil + } +} + +func inlineHandlerBuilder(m *http.ServeMux, endpoints Endpoints) { + m.Handle("/bar", httptransport.NewServer(endpoints.ExampleEndpoint, DecodeExampleRequest, EncodeExampleResponse)) +} + +func NewHTTPHandler(endpoints Endpoints) http.Handler { + m := http.NewServeMux() + inlineHandlerBuilder(m, endpoints) + return m +} + +func DecodeExampleRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req ExampleRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} + +func EncodeExampleResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/default/endpoints/endpoints.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/default/endpoints/endpoints.go new file mode 100644 index 000000000..1bc722ccf --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/default/endpoints/endpoints.go @@ -0,0 +1,28 @@ +package endpoints + +import "context" + +import "github.com/go-kit/kit/endpoint" + +import "github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/default/service" + +type FooRequest struct { + I int + S string +} +type FooResponse struct { + I int + Err error +} + +func MakeFooEndpoint(s service.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(FooRequest) + i, err := s.Foo(ctx, req.I, req.S) + return FooResponse{I: i, Err: err}, nil + } +} + +type Endpoints struct { + Foo endpoint.Endpoint +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/default/http/http.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/default/http/http.go new file mode 100644 index 000000000..e02944084 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/default/http/http.go @@ -0,0 +1,24 @@ +package http + +import "context" +import "encoding/json" + +import "net/http" + +import httptransport "github.com/go-kit/kit/transport/http" +import "github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/default/endpoints" + +func NewHTTPHandler(endpoints endpoints.Endpoints) http.Handler { + m := http.NewServeMux() + m.Handle("/foo", httptransport.NewServer(endpoints.Foo, DecodeFooRequest, EncodeFooResponse)) + return m +} +func DecodeFooRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req endpoints.FooRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeFooResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/default/service/service.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/default/service/service.go new file mode 100644 index 000000000..8adbd5a14 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/default/service/service.go @@ -0,0 +1,12 @@ +package service + +import "context" + +import "errors" + +type Service struct { +} + +func (s Service) Foo(ctx context.Context, i int, string1 string) (int, error) { + panic(errors.New("not implemented")) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/flat/gokit.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/flat/gokit.go new file mode 100644 index 000000000..068cb76ee --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/flat/gokit.go @@ -0,0 +1,51 @@ +package foo + +import "context" +import "encoding/json" +import "errors" +import "net/http" +import "github.com/go-kit/kit/endpoint" +import httptransport "github.com/go-kit/kit/transport/http" + +type Service struct { +} + +func (s Service) Foo(ctx context.Context, i int, string1 string) (int, error) { + panic(errors.New("not implemented")) +} + +type FooRequest struct { + I int + S string +} +type FooResponse struct { + I int + Err error +} + +func MakeFooEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(FooRequest) + i, err := s.Foo(ctx, req.I, req.S) + return FooResponse{I: i, Err: err}, nil + } +} + +type Endpoints struct { + Foo endpoint.Endpoint +} + +func NewHTTPHandler(endpoints Endpoints) http.Handler { + m := http.NewServeMux() + m.Handle("/foo", httptransport.NewServer(endpoints.Foo, DecodeFooRequest, EncodeFooResponse)) + return m +} +func DecodeFooRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req FooRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeFooResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/in.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/in.go new file mode 100644 index 000000000..c0c87d808 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/anonfields/in.go @@ -0,0 +1,6 @@ +package foo + +// from https://github.com/go-kit/kit/pull/589#issuecomment-319937530 +type Service interface { + Foo(context.Context, int, string) (int, error) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/default/endpoints/endpoints.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/default/endpoints/endpoints.go new file mode 100644 index 000000000..5c654a12e --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/default/endpoints/endpoints.go @@ -0,0 +1,28 @@ +package endpoints + +import "context" + +import "github.com/go-kit/kit/endpoint" + +import "github.com/go-kit/kit/cmd/kitgen/testdata/foo/default/service" + +type BarRequest struct { + I int + S string +} +type BarResponse struct { + S string + Err error +} + +func MakeBarEndpoint(f service.FooService) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(BarRequest) + s, err := f.Bar(ctx, req.I, req.S) + return BarResponse{S: s, Err: err}, nil + } +} + +type Endpoints struct { + Bar endpoint.Endpoint +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/default/http/http.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/default/http/http.go new file mode 100644 index 000000000..286926306 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/default/http/http.go @@ -0,0 +1,24 @@ +package http + +import "context" +import "encoding/json" + +import "net/http" + +import httptransport "github.com/go-kit/kit/transport/http" +import "github.com/go-kit/kit/cmd/kitgen/testdata/foo/default/endpoints" + +func NewHTTPHandler(endpoints endpoints.Endpoints) http.Handler { + m := http.NewServeMux() + m.Handle("/bar", httptransport.NewServer(endpoints.Bar, DecodeBarRequest, EncodeBarResponse)) + return m +} +func DecodeBarRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req endpoints.BarRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeBarResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/default/service/service.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/default/service/service.go new file mode 100644 index 000000000..02a7babea --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/default/service/service.go @@ -0,0 +1,12 @@ +package service + +import "context" + +import "errors" + +type FooService struct { +} + +func (f FooService) Bar(ctx context.Context, i int, s string) (string, error) { + panic(errors.New("not implemented")) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/flat/gokit.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/flat/gokit.go new file mode 100644 index 000000000..35c89f113 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/flat/gokit.go @@ -0,0 +1,51 @@ +package foo + +import "context" +import "encoding/json" +import "errors" +import "net/http" +import "github.com/go-kit/kit/endpoint" +import httptransport "github.com/go-kit/kit/transport/http" + +type FooService struct { +} + +func (f FooService) Bar(ctx context.Context, i int, s string) (string, error) { + panic(errors.New("not implemented")) +} + +type BarRequest struct { + I int + S string +} +type BarResponse struct { + S string + Err error +} + +func MakeBarEndpoint(f FooService) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(BarRequest) + s, err := f.Bar(ctx, req.I, req.S) + return BarResponse{S: s, Err: err}, nil + } +} + +type Endpoints struct { + Bar endpoint.Endpoint +} + +func NewHTTPHandler(endpoints Endpoints) http.Handler { + m := http.NewServeMux() + m.Handle("/bar", httptransport.NewServer(endpoints.Bar, DecodeBarRequest, EncodeBarResponse)) + return m +} +func DecodeBarRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req BarRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeBarResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/in.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/in.go new file mode 100644 index 000000000..1c01933fa --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/foo/in.go @@ -0,0 +1,5 @@ +package foo + +type FooService interface { + Bar(ctx context.Context, i int, s string) (string, error) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/default/endpoints/endpoints.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/default/endpoints/endpoints.go new file mode 100644 index 000000000..dedd04140 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/default/endpoints/endpoints.go @@ -0,0 +1,162 @@ +package endpoints + +import "context" + +import "github.com/go-kit/kit/endpoint" + +import "github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/default/service" + +type PostProfileRequest struct { + P service.Profile +} +type PostProfileResponse struct { + Err error +} + +func MakePostProfileEndpoint(s service.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(PostProfileRequest) + err := s.PostProfile(ctx, req.P) + return PostProfileResponse{Err: err}, nil + } +} + +type GetProfileRequest struct { + Id string +} +type GetProfileResponse struct { + P service.Profile + Err error +} + +func MakeGetProfileEndpoint(s service.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(GetProfileRequest) + P, err := s.GetProfile(ctx, req.Id) + return GetProfileResponse{P: P, Err: err}, nil + } +} + +type PutProfileRequest struct { + Id string + P service.Profile +} +type PutProfileResponse struct { + Err error +} + +func MakePutProfileEndpoint(s service.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(PutProfileRequest) + err := s.PutProfile(ctx, req.Id, req.P) + return PutProfileResponse{Err: err}, nil + } +} + +type PatchProfileRequest struct { + Id string + P service.Profile +} +type PatchProfileResponse struct { + Err error +} + +func MakePatchProfileEndpoint(s service.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(PatchProfileRequest) + err := s.PatchProfile(ctx, req.Id, req.P) + return PatchProfileResponse{Err: err}, nil + } +} + +type DeleteProfileRequest struct { + Id string +} +type DeleteProfileResponse struct { + Err error +} + +func MakeDeleteProfileEndpoint(s service.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(DeleteProfileRequest) + err := s.DeleteProfile(ctx, req.Id) + return DeleteProfileResponse{Err: err}, nil + } +} + +type GetAddressesRequest struct { + ProfileID string +} +type GetAddressesResponse struct { + S []service.Address + Err error +} + +func MakeGetAddressesEndpoint(s service.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(GetAddressesRequest) + slice1, err := s.GetAddresses(ctx, req.ProfileID) + return GetAddressesResponse{S: slice1, Err: err}, nil + } +} + +type GetAddressRequest struct { + ProfileID string + AddressID string +} +type GetAddressResponse struct { + A service.Address + Err error +} + +func MakeGetAddressEndpoint(s service.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(GetAddressRequest) + A, err := s.GetAddress(ctx, req.ProfileID, req.AddressID) + return GetAddressResponse{A: A, Err: err}, nil + } +} + +type PostAddressRequest struct { + ProfileID string + A service.Address +} +type PostAddressResponse struct { + Err error +} + +func MakePostAddressEndpoint(s service.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(PostAddressRequest) + err := s.PostAddress(ctx, req.ProfileID, req.A) + return PostAddressResponse{Err: err}, nil + } +} + +type DeleteAddressRequest struct { + ProfileID string + AddressID string +} +type DeleteAddressResponse struct { + Err error +} + +func MakeDeleteAddressEndpoint(s service.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(DeleteAddressRequest) + err := s.DeleteAddress(ctx, req.ProfileID, req.AddressID) + return DeleteAddressResponse{Err: err}, nil + } +} + +type Endpoints struct { + PostProfile endpoint.Endpoint + GetProfile endpoint.Endpoint + PutProfile endpoint.Endpoint + PatchProfile endpoint.Endpoint + DeleteProfile endpoint.Endpoint + GetAddresses endpoint.Endpoint + GetAddress endpoint.Endpoint + PostAddress endpoint.Endpoint + DeleteAddress endpoint.Endpoint +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/default/http/http.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/default/http/http.go new file mode 100644 index 000000000..b59f762b8 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/default/http/http.go @@ -0,0 +1,104 @@ +package http + +import "context" +import "encoding/json" + +import "net/http" + +import httptransport "github.com/go-kit/kit/transport/http" +import "github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/default/endpoints" + +func NewHTTPHandler(endpoints endpoints.Endpoints) http.Handler { + m := http.NewServeMux() + m.Handle("/postprofile", httptransport.NewServer(endpoints.PostProfile, DecodePostProfileRequest, EncodePostProfileResponse)) + m.Handle("/getprofile", httptransport.NewServer(endpoints.GetProfile, DecodeGetProfileRequest, EncodeGetProfileResponse)) + m.Handle("/putprofile", httptransport.NewServer(endpoints.PutProfile, DecodePutProfileRequest, EncodePutProfileResponse)) + m.Handle("/patchprofile", httptransport.NewServer(endpoints.PatchProfile, DecodePatchProfileRequest, EncodePatchProfileResponse)) + m.Handle("/deleteprofile", httptransport.NewServer(endpoints.DeleteProfile, DecodeDeleteProfileRequest, EncodeDeleteProfileResponse)) + m.Handle("/getaddresses", httptransport.NewServer(endpoints.GetAddresses, DecodeGetAddressesRequest, EncodeGetAddressesResponse)) + m.Handle("/getaddress", httptransport.NewServer(endpoints.GetAddress, DecodeGetAddressRequest, EncodeGetAddressResponse)) + m.Handle("/postaddress", httptransport.NewServer(endpoints.PostAddress, DecodePostAddressRequest, EncodePostAddressResponse)) + m.Handle("/deleteaddress", httptransport.NewServer(endpoints.DeleteAddress, DecodeDeleteAddressRequest, EncodeDeleteAddressResponse)) + return m +} +func DecodePostProfileRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req endpoints.PostProfileRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodePostProfileResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodeGetProfileRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req endpoints.GetProfileRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeGetProfileResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodePutProfileRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req endpoints.PutProfileRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodePutProfileResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodePatchProfileRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req endpoints.PatchProfileRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodePatchProfileResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodeDeleteProfileRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req endpoints.DeleteProfileRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeDeleteProfileResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodeGetAddressesRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req endpoints.GetAddressesRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeGetAddressesResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodeGetAddressRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req endpoints.GetAddressRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeGetAddressResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodePostAddressRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req endpoints.PostAddressRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodePostAddressResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodeDeleteAddressRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req endpoints.DeleteAddressRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeDeleteAddressResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/default/service/service.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/default/service/service.go new file mode 100644 index 000000000..42d1b4d2e --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/default/service/service.go @@ -0,0 +1,45 @@ +package service + +import "context" + +import "errors" + +type Profile struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Addresses []Address `json:"addresses,omitempty"` +} +type Address struct { + ID string `json:"id"` + Location string `json:"location,omitempty"` +} +type Service struct { +} + +func (s Service) PostProfile(ctx context.Context, p Profile) error { + panic(errors.New("not implemented")) +} +func (s Service) GetProfile(ctx context.Context, id string) (Profile, error) { + panic(errors.New("not implemented")) +} +func (s Service) PutProfile(ctx context.Context, id string, p Profile) error { + panic(errors.New("not implemented")) +} +func (s Service) PatchProfile(ctx context.Context, id string, p Profile) error { + panic(errors.New("not implemented")) +} +func (s Service) DeleteProfile(ctx context.Context, id string) error { + panic(errors.New("not implemented")) +} +func (s Service) GetAddresses(ctx context.Context, profileID string) ([]Address, error) { + panic(errors.New("not implemented")) +} +func (s Service) GetAddress(ctx context.Context, profileID string, addressID string) (Address, error) { + panic(errors.New("not implemented")) +} +func (s Service) PostAddress(ctx context.Context, profileID string, a Address) error { + panic(errors.New("not implemented")) +} +func (s Service) DeleteAddress(ctx context.Context, profileID string, addressID string) error { + panic(errors.New("not implemented")) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/flat/gokit.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/flat/gokit.go new file mode 100644 index 000000000..e49e8614a --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/flat/gokit.go @@ -0,0 +1,298 @@ +package profilesvc + +import "context" +import "encoding/json" +import "errors" +import "net/http" +import "github.com/go-kit/kit/endpoint" +import httptransport "github.com/go-kit/kit/transport/http" + +type Profile struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Addresses []Address `json:"addresses,omitempty"` +} +type Address struct { + ID string `json:"id"` + Location string `json:"location,omitempty"` +} +type Service struct { +} + +func (s Service) PostProfile(ctx context.Context, p Profile) error { + panic(errors.New("not implemented")) +} + +type PostProfileRequest struct { + P Profile +} +type PostProfileResponse struct { + Err error +} + +func MakePostProfileEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(PostProfileRequest) + err := s.PostProfile(ctx, req.P) + return PostProfileResponse{Err: err}, nil + } +} +func (s Service) GetProfile(ctx context.Context, id string) (Profile, error) { + panic(errors.New("not implemented")) +} + +type GetProfileRequest struct { + Id string +} +type GetProfileResponse struct { + P Profile + Err error +} + +func MakeGetProfileEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(GetProfileRequest) + P, err := s.GetProfile(ctx, req.Id) + return GetProfileResponse{P: P, Err: err}, nil + } +} +func (s Service) PutProfile(ctx context.Context, id string, p Profile) error { + panic(errors.New("not implemented")) +} + +type PutProfileRequest struct { + Id string + P Profile +} +type PutProfileResponse struct { + Err error +} + +func MakePutProfileEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(PutProfileRequest) + err := s.PutProfile(ctx, req.Id, req.P) + return PutProfileResponse{Err: err}, nil + } +} +func (s Service) PatchProfile(ctx context.Context, id string, p Profile) error { + panic(errors.New("not implemented")) +} + +type PatchProfileRequest struct { + Id string + P Profile +} +type PatchProfileResponse struct { + Err error +} + +func MakePatchProfileEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(PatchProfileRequest) + err := s.PatchProfile(ctx, req.Id, req.P) + return PatchProfileResponse{Err: err}, nil + } +} +func (s Service) DeleteProfile(ctx context.Context, id string) error { + panic(errors.New("not implemented")) +} + +type DeleteProfileRequest struct { + Id string +} +type DeleteProfileResponse struct { + Err error +} + +func MakeDeleteProfileEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(DeleteProfileRequest) + err := s.DeleteProfile(ctx, req.Id) + return DeleteProfileResponse{Err: err}, nil + } +} +func (s Service) GetAddresses(ctx context.Context, profileID string) ([]Address, error) { + panic(errors.New("not implemented")) +} + +type GetAddressesRequest struct { + ProfileID string +} +type GetAddressesResponse struct { + S []Address + Err error +} + +func MakeGetAddressesEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(GetAddressesRequest) + slice1, err := s.GetAddresses(ctx, req.ProfileID) + return GetAddressesResponse{S: slice1, Err: err}, nil + } +} +func (s Service) GetAddress(ctx context.Context, profileID string, addressID string) (Address, error) { + panic(errors.New("not implemented")) +} + +type GetAddressRequest struct { + ProfileID string + AddressID string +} +type GetAddressResponse struct { + A Address + Err error +} + +func MakeGetAddressEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(GetAddressRequest) + A, err := s.GetAddress(ctx, req.ProfileID, req.AddressID) + return GetAddressResponse{A: A, Err: err}, nil + } +} +func (s Service) PostAddress(ctx context.Context, profileID string, a Address) error { + panic(errors.New("not implemented")) +} + +type PostAddressRequest struct { + ProfileID string + A Address +} +type PostAddressResponse struct { + Err error +} + +func MakePostAddressEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(PostAddressRequest) + err := s.PostAddress(ctx, req.ProfileID, req.A) + return PostAddressResponse{Err: err}, nil + } +} +func (s Service) DeleteAddress(ctx context.Context, profileID string, addressID string) error { + panic(errors.New("not implemented")) +} + +type DeleteAddressRequest struct { + ProfileID string + AddressID string +} +type DeleteAddressResponse struct { + Err error +} + +func MakeDeleteAddressEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(DeleteAddressRequest) + err := s.DeleteAddress(ctx, req.ProfileID, req.AddressID) + return DeleteAddressResponse{Err: err}, nil + } +} + +type Endpoints struct { + PostProfile endpoint.Endpoint + GetProfile endpoint.Endpoint + PutProfile endpoint.Endpoint + PatchProfile endpoint.Endpoint + DeleteProfile endpoint.Endpoint + GetAddresses endpoint.Endpoint + GetAddress endpoint.Endpoint + PostAddress endpoint.Endpoint + DeleteAddress endpoint.Endpoint +} + +func NewHTTPHandler(endpoints Endpoints) http.Handler { + m := http.NewServeMux() + m.Handle("/postprofile", httptransport.NewServer(endpoints.PostProfile, DecodePostProfileRequest, EncodePostProfileResponse)) + m.Handle("/getprofile", httptransport.NewServer(endpoints.GetProfile, DecodeGetProfileRequest, EncodeGetProfileResponse)) + m.Handle("/putprofile", httptransport.NewServer(endpoints.PutProfile, DecodePutProfileRequest, EncodePutProfileResponse)) + m.Handle("/patchprofile", httptransport.NewServer(endpoints.PatchProfile, DecodePatchProfileRequest, EncodePatchProfileResponse)) + m.Handle("/deleteprofile", httptransport.NewServer(endpoints.DeleteProfile, DecodeDeleteProfileRequest, EncodeDeleteProfileResponse)) + m.Handle("/getaddresses", httptransport.NewServer(endpoints.GetAddresses, DecodeGetAddressesRequest, EncodeGetAddressesResponse)) + m.Handle("/getaddress", httptransport.NewServer(endpoints.GetAddress, DecodeGetAddressRequest, EncodeGetAddressResponse)) + m.Handle("/postaddress", httptransport.NewServer(endpoints.PostAddress, DecodePostAddressRequest, EncodePostAddressResponse)) + m.Handle("/deleteaddress", httptransport.NewServer(endpoints.DeleteAddress, DecodeDeleteAddressRequest, EncodeDeleteAddressResponse)) + return m +} +func DecodePostProfileRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req PostProfileRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodePostProfileResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodeGetProfileRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req GetProfileRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeGetProfileResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodePutProfileRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req PutProfileRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodePutProfileResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodePatchProfileRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req PatchProfileRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodePatchProfileResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodeDeleteProfileRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req DeleteProfileRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeDeleteProfileResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodeGetAddressesRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req GetAddressesRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeGetAddressesResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodeGetAddressRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req GetAddressRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeGetAddressResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodePostAddressRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req PostAddressRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodePostAddressResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodeDeleteAddressRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req DeleteAddressRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeDeleteAddressResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/in.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/in.go new file mode 100644 index 000000000..208fed9b7 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/profilesvc/in.go @@ -0,0 +1,24 @@ +package profilesvc + +type Service interface { + PostProfile(ctx context.Context, p Profile) error + GetProfile(ctx context.Context, id string) (Profile, error) + PutProfile(ctx context.Context, id string, p Profile) error + PatchProfile(ctx context.Context, id string, p Profile) error + DeleteProfile(ctx context.Context, id string) error + GetAddresses(ctx context.Context, profileID string) ([]Address, error) + GetAddress(ctx context.Context, profileID string, addressID string) (Address, error) + PostAddress(ctx context.Context, profileID string, a Address) error + DeleteAddress(ctx context.Context, profileID string, addressID string) error +} + +type Profile struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Addresses []Address `json:"addresses,omitempty"` +} + +type Address struct { + ID string `json:"id"` + Location string `json:"location,omitempty"` +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/default/endpoints/endpoints.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/default/endpoints/endpoints.go new file mode 100644 index 000000000..baeb95186 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/default/endpoints/endpoints.go @@ -0,0 +1,44 @@ +package endpoints + +import "context" + +import "github.com/go-kit/kit/endpoint" + +import "github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/default/service" + +type ConcatRequest struct { + A string + B string +} +type ConcatResponse struct { + S string + Err error +} + +func MakeConcatEndpoint(s service.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(ConcatRequest) + string1, err := s.Concat(ctx, req.A, req.B) + return ConcatResponse{S: string1, Err: err}, nil + } +} + +type CountRequest struct { + S string +} +type CountResponse struct { + Count int +} + +func MakeCountEndpoint(s service.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(CountRequest) + count := s.Count(ctx, req.S) + return CountResponse{Count: count}, nil + } +} + +type Endpoints struct { + Concat endpoint.Endpoint + Count endpoint.Endpoint +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/default/http/http.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/default/http/http.go new file mode 100644 index 000000000..31e2c1938 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/default/http/http.go @@ -0,0 +1,34 @@ +package http + +import "context" +import "encoding/json" + +import "net/http" + +import httptransport "github.com/go-kit/kit/transport/http" +import "github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/default/endpoints" + +func NewHTTPHandler(endpoints endpoints.Endpoints) http.Handler { + m := http.NewServeMux() + m.Handle("/concat", httptransport.NewServer(endpoints.Concat, DecodeConcatRequest, EncodeConcatResponse)) + m.Handle("/count", httptransport.NewServer(endpoints.Count, DecodeCountRequest, EncodeCountResponse)) + return m +} +func DecodeConcatRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req endpoints.ConcatRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeConcatResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodeCountRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req endpoints.CountRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeCountResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/default/service/service.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/default/service/service.go new file mode 100644 index 000000000..ddf24f972 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/default/service/service.go @@ -0,0 +1,15 @@ +package service + +import "context" + +import "errors" + +type Service struct { +} + +func (s Service) Concat(ctx context.Context, a string, b string) (string, error) { + panic(errors.New("not implemented")) +} +func (s Service) Count(ctx context.Context, string1 string) int { + panic(errors.New("not implemented")) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/flat/gokit.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/flat/gokit.go new file mode 100644 index 000000000..476bb7a1b --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/flat/gokit.go @@ -0,0 +1,80 @@ +package foo + +import "context" +import "encoding/json" +import "errors" +import "net/http" +import "github.com/go-kit/kit/endpoint" +import httptransport "github.com/go-kit/kit/transport/http" + +type Service struct { +} + +func (s Service) Concat(ctx context.Context, a string, b string) (string, error) { + panic(errors.New("not implemented")) +} + +type ConcatRequest struct { + A string + B string +} +type ConcatResponse struct { + S string + Err error +} + +func MakeConcatEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(ConcatRequest) + string1, err := s.Concat(ctx, req.A, req.B) + return ConcatResponse{S: string1, Err: err}, nil + } +} +func (s Service) Count(ctx context.Context, string1 string) int { + panic(errors.New("not implemented")) +} + +type CountRequest struct { + S string +} +type CountResponse struct { + Count int +} + +func MakeCountEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(CountRequest) + count := s.Count(ctx, req.S) + return CountResponse{Count: count}, nil + } +} + +type Endpoints struct { + Concat endpoint.Endpoint + Count endpoint.Endpoint +} + +func NewHTTPHandler(endpoints Endpoints) http.Handler { + m := http.NewServeMux() + m.Handle("/concat", httptransport.NewServer(endpoints.Concat, DecodeConcatRequest, EncodeConcatResponse)) + m.Handle("/count", httptransport.NewServer(endpoints.Count, DecodeCountRequest, EncodeCountResponse)) + return m +} +func DecodeConcatRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req ConcatRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeConcatResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} +func DecodeCountRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req CountRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeCountResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/in.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/in.go new file mode 100644 index 000000000..68f018752 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/stringservice/in.go @@ -0,0 +1,6 @@ +package foo + +type Service interface { + Concat(ctx context.Context, a, b string) (string, error) + Count(ctx context.Context, s string) (count int) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/default/endpoints/endpoints.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/default/endpoints/endpoints.go new file mode 100644 index 000000000..f90cc9b6d --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/default/endpoints/endpoints.go @@ -0,0 +1,27 @@ +package endpoints + +import "context" + +import "github.com/go-kit/kit/endpoint" + +import "github.com/go-kit/kit/cmd/kitgen/testdata/underscores/default/service" + +type FooRequest struct { + I int +} +type FooResponse struct { + I int + Err error +} + +func MakeFooEndpoint(s service.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(FooRequest) + i, err := s.Foo(ctx, req.I) + return FooResponse{I: i, Err: err}, nil + } +} + +type Endpoints struct { + Foo endpoint.Endpoint +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/default/http/http.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/default/http/http.go new file mode 100644 index 000000000..a2844f048 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/default/http/http.go @@ -0,0 +1,24 @@ +package http + +import "context" +import "encoding/json" + +import "net/http" + +import httptransport "github.com/go-kit/kit/transport/http" +import "github.com/go-kit/kit/cmd/kitgen/testdata/underscores/default/endpoints" + +func NewHTTPHandler(endpoints endpoints.Endpoints) http.Handler { + m := http.NewServeMux() + m.Handle("/foo", httptransport.NewServer(endpoints.Foo, DecodeFooRequest, EncodeFooResponse)) + return m +} +func DecodeFooRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req endpoints.FooRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeFooResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/default/service/service.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/default/service/service.go new file mode 100644 index 000000000..e249a490f --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/default/service/service.go @@ -0,0 +1,12 @@ +package service + +import "context" + +import "errors" + +type Service struct { +} + +func (s Service) Foo(ctx context.Context, i int) (int, error) { + panic(errors.New("not implemented")) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/flat/gokit.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/flat/gokit.go new file mode 100644 index 000000000..8abe85be8 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/flat/gokit.go @@ -0,0 +1,50 @@ +package underscores + +import "context" +import "encoding/json" +import "errors" +import "net/http" +import "github.com/go-kit/kit/endpoint" +import httptransport "github.com/go-kit/kit/transport/http" + +type Service struct { +} + +func (s Service) Foo(ctx context.Context, i int) (int, error) { + panic(errors.New("not implemented")) +} + +type FooRequest struct { + I int +} +type FooResponse struct { + I int + Err error +} + +func MakeFooEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(FooRequest) + i, err := s.Foo(ctx, req.I) + return FooResponse{I: i, Err: err}, nil + } +} + +type Endpoints struct { + Foo endpoint.Endpoint +} + +func NewHTTPHandler(endpoints Endpoints) http.Handler { + m := http.NewServeMux() + m.Handle("/foo", httptransport.NewServer(endpoints.Foo, DecodeFooRequest, EncodeFooResponse)) + return m +} +func DecodeFooRequest(_ context.Context, r *http.Request) (interface{}, error) { + var req FooRequest + err := json.NewDecoder(r.Body).Decode(&req) + return req, err +} +func EncodeFooResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/in.go b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/in.go new file mode 100644 index 000000000..9457ee060 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/testdata/underscores/in.go @@ -0,0 +1,7 @@ +package underscores + +import "context" + +type Service interface { + Foo(_ context.Context, _ int) (int, error) +} diff --git a/vendor/github.com/go-kit/kit/cmd/kitgen/transform.go b/vendor/github.com/go-kit/kit/cmd/kitgen/transform.go new file mode 100644 index 000000000..362398c92 --- /dev/null +++ b/vendor/github.com/go-kit/kit/cmd/kitgen/transform.go @@ -0,0 +1,230 @@ +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "go/token" + "io" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/davecgh/go-spew/spew" + "github.com/pkg/errors" + + "golang.org/x/tools/imports" +) + +type ( + files map[string]io.Reader + layout interface { + transformAST(ctx *sourceContext) (files, error) + } + outputTree map[string]*ast.File +) + +func (ot outputTree) addFile(path, pkgname string) *ast.File { + file := &ast.File{ + Name: id(pkgname), + Decls: []ast.Decl{}, + } + ot[path] = file + return file +} + +func getGopath() string { + gopath, set := os.LookupEnv("GOPATH") + if !set { + return filepath.Join(os.Getenv("HOME"), "go") + } + return gopath +} + +func importPath(targetDir, gopath string) (string, error) { + if !filepath.IsAbs(targetDir) { + return "", fmt.Errorf("%q is not an absolute path", targetDir) + } + + for _, dir := range filepath.SplitList(gopath) { + abspath, err := filepath.Abs(dir) + if err != nil { + continue + } + srcPath := filepath.Join(abspath, "src") + + res, err := filepath.Rel(srcPath, targetDir) + if err != nil { + continue + } + if strings.Index(res, "..") == -1 { + return res, nil + } + } + return "", fmt.Errorf("%q is not in GOPATH (%s)", targetDir, gopath) + +} + +func selectify(file *ast.File, pkgName, identName, importPath string) *ast.File { + if file.Name.Name == pkgName { + return file + } + + selector := sel(id(pkgName), id(identName)) + var did bool + if file, did = selectifyIdent(identName, file, selector); did { + addImport(file, importPath) + } + return file +} + +type selIdentFn func(ast.Node, func(ast.Node)) Visitor + +func (f selIdentFn) Visit(node ast.Node, r func(ast.Node)) Visitor { + return f(node, r) +} + +func selectifyIdent(identName string, file *ast.File, selector ast.Expr) (*ast.File, bool) { + var replaced bool + var r selIdentFn + r = selIdentFn(func(node ast.Node, replaceWith func(ast.Node)) Visitor { + switch id := node.(type) { + case *ast.SelectorExpr: + return nil + case *ast.Ident: + if id.Name == identName { + replaced = true + replaceWith(selector) + } + } + return r + }) + return WalkReplace(r, file).(*ast.File), replaced +} + +func formatNode(fname string, node ast.Node) (*bytes.Buffer, error) { + if file, is := node.(*ast.File); is { + sort.Stable(sortableDecls(file.Decls)) + } + outfset := token.NewFileSet() + buf := &bytes.Buffer{} + err := format.Node(buf, outfset, node) + if err != nil { + return nil, err + } + imps, err := imports.Process(fname, buf.Bytes(), nil) + if err != nil { + return nil, err + } + return bytes.NewBuffer(imps), nil +} + +type sortableDecls []ast.Decl + +func (sd sortableDecls) Len() int { + return len(sd) +} + +func (sd sortableDecls) Less(i int, j int) bool { + switch left := sd[i].(type) { + case *ast.GenDecl: + switch right := sd[j].(type) { + default: + return left.Tok == token.IMPORT + case *ast.GenDecl: + return left.Tok == token.IMPORT && right.Tok != token.IMPORT + } + } + return false +} + +func (sd sortableDecls) Swap(i int, j int) { + sd[i], sd[j] = sd[j], sd[i] +} + +func formatNodes(nodes outputTree) (files, error) { + res := files{} + var err error + for fn, node := range nodes { + res[fn], err = formatNode(fn, node) + if err != nil { + return nil, errors.Wrapf(err, "formatNodes") + } + } + return res, nil +} + +// XXX debug +func spewDecls(f *ast.File) { + for _, d := range f.Decls { + switch dcl := d.(type) { + default: + spew.Dump(dcl) + case *ast.GenDecl: + spew.Dump(dcl.Tok) + case *ast.FuncDecl: + spew.Dump(dcl.Name.Name) + } + } +} + +func addImports(root *ast.File, ctx *sourceContext) { + root.Decls = append(root.Decls, ctx.importDecls()...) +} + +func addImport(root *ast.File, path string) { + for _, d := range root.Decls { + if imp, is := d.(*ast.GenDecl); is && imp.Tok == token.IMPORT { + for _, s := range imp.Specs { + if s.(*ast.ImportSpec).Path.Value == `"`+path+`"` { + return // already have one + // xxx aliased imports? + } + } + } + } + root.Decls = append(root.Decls, importFor(importSpec(path))) +} + +func addStubStruct(root *ast.File, iface iface) { + root.Decls = append(root.Decls, iface.stubStructDecl()) +} + +func addType(root *ast.File, typ *ast.TypeSpec) { + root.Decls = append(root.Decls, typeDecl(typ)) +} + +func addMethod(root *ast.File, iface iface, meth method) { + def := meth.definition(iface) + root.Decls = append(root.Decls, def) +} + +func addRequestStruct(root *ast.File, meth method) { + root.Decls = append(root.Decls, meth.requestStruct()) +} + +func addResponseStruct(root *ast.File, meth method) { + root.Decls = append(root.Decls, meth.responseStruct()) +} + +func addEndpointMaker(root *ast.File, ifc iface, meth method) { + root.Decls = append(root.Decls, meth.endpointMaker(ifc)) +} + +func addEndpointsStruct(root *ast.File, ifc iface) { + root.Decls = append(root.Decls, ifc.endpointsStruct()) +} + +func addHTTPHandler(root *ast.File, ifc iface) { + root.Decls = append(root.Decls, ifc.httpHandler()) +} + +func addDecoder(root *ast.File, meth method) { + root.Decls = append(root.Decls, meth.decoderFunc()) +} + +func addEncoder(root *ast.File, meth method) { + root.Decls = append(root.Decls, meth.encoderFunc()) +} diff --git a/vendor/github.com/go-kit/kit/coveralls.bash b/vendor/github.com/go-kit/kit/coveralls.bash new file mode 100755 index 000000000..cf8fee93b --- /dev/null +++ b/vendor/github.com/go-kit/kit/coveralls.bash @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +if ! type -P gover +then + echo gover missing: go get github.com/modocache/gover + exit 1 +fi + +if ! type -P goveralls +then + echo goveralls missing: go get github.com/mattn/goveralls + exit 1 +fi + +if [[ "$COVERALLS_TOKEN" == "" ]] +then + echo COVERALLS_TOKEN not set + exit 1 +fi + +go list ./... | grep -v '/examples/' | cut -d'/' -f 4- | while read d +do + cd $d + go test -covermode count -coverprofile coverage.coverprofile + cd - +done + +gover +goveralls -coverprofile gover.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN +find . -name '*.coverprofile' -delete + diff --git a/vendor/github.com/go-kit/kit/docker-compose-integration.yml b/vendor/github.com/go-kit/kit/docker-compose-integration.yml new file mode 100644 index 000000000..287d97db0 --- /dev/null +++ b/vendor/github.com/go-kit/kit/docker-compose-integration.yml @@ -0,0 +1,22 @@ +version: '2' +services: + etcd: + image: quay.io/coreos/etcd + ports: + - "2379:2379" + command: /usr/local/bin/etcd -advertise-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 -listen-client-urls "http://0.0.0.0:2379,http://0.0.0.0:4001" + consul: + image: progrium/consul + ports: + - "8500:8500" + command: -server -bootstrap + zk: + image: zookeeper + ports: + - "2181:2181" + eureka: + image: springcloud/eureka + environment: + eureka.server.responseCacheUpdateIntervalMs: 1000 + ports: + - "8761:8761" diff --git a/vendor/github.com/go-kit/kit/endpoint/doc.go b/vendor/github.com/go-kit/kit/endpoint/doc.go new file mode 100644 index 000000000..84e27b95d --- /dev/null +++ b/vendor/github.com/go-kit/kit/endpoint/doc.go @@ -0,0 +1,5 @@ +// Package endpoint defines an abstraction for RPCs. +// +// Endpoints are a fundamental building block for many Go kit components. +// Endpoints are implemented by servers, and called by clients. +package endpoint diff --git a/vendor/github.com/go-kit/kit/endpoint/endpoint.go b/vendor/github.com/go-kit/kit/endpoint/endpoint.go new file mode 100644 index 000000000..1b64f50ed --- /dev/null +++ b/vendor/github.com/go-kit/kit/endpoint/endpoint.go @@ -0,0 +1,28 @@ +package endpoint + +import ( + "context" +) + +// Endpoint is the fundamental building block of servers and clients. +// It represents a single RPC method. +type Endpoint func(ctx context.Context, request interface{}) (response interface{}, err error) + +// Nop is an endpoint that does nothing and returns a nil error. +// Useful for tests. +func Nop(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil } + +// Middleware is a chainable behavior modifier for endpoints. +type Middleware func(Endpoint) Endpoint + +// Chain is a helper function for composing middlewares. Requests will +// traverse them in the order they're declared. That is, the first middleware +// is treated as the outermost middleware. +func Chain(outer Middleware, others ...Middleware) Middleware { + return func(next Endpoint) Endpoint { + for i := len(others) - 1; i >= 0; i-- { // reverse + next = others[i](next) + } + return outer(next) + } +} diff --git a/vendor/github.com/go-kit/kit/endpoint/endpoint_example_test.go b/vendor/github.com/go-kit/kit/endpoint/endpoint_example_test.go new file mode 100644 index 000000000..e95062305 --- /dev/null +++ b/vendor/github.com/go-kit/kit/endpoint/endpoint_example_test.go @@ -0,0 +1,49 @@ +package endpoint_test + +import ( + "context" + "fmt" + + "github.com/go-kit/kit/endpoint" +) + +func ExampleChain() { + e := endpoint.Chain( + annotate("first"), + annotate("second"), + annotate("third"), + )(myEndpoint) + + if _, err := e(ctx, req); err != nil { + panic(err) + } + + // Output: + // first pre + // second pre + // third pre + // my endpoint! + // third post + // second post + // first post +} + +var ( + ctx = context.Background() + req = struct{}{} +) + +func annotate(s string) endpoint.Middleware { + return func(next endpoint.Endpoint) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + fmt.Println(s, "pre") + defer fmt.Println(s, "post") + return next(ctx, request) + } + } +} + +func myEndpoint(context.Context, interface{}) (interface{}, error) { + fmt.Println("my endpoint!") + return struct{}{}, nil +} diff --git a/vendor/github.com/go-kit/kit/examples/README.md b/vendor/github.com/go-kit/kit/examples/README.md new file mode 100644 index 000000000..2891d7886 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/README.md @@ -0,0 +1,5 @@ +# Examples + +For more information about these examples, + including a walkthrough of the stringsvc example, + see [gokit.io/examples](https://gokit.io/examples). diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/README.md b/vendor/github.com/go-kit/kit/examples/addsvc/README.md new file mode 100644 index 000000000..080800609 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/addsvc/README.md @@ -0,0 +1,17 @@ +# addsvc + +addsvc is an example microservice which takes full advantage of most of Go +kit's features, including both service- and transport-level middlewares, +speaking multiple transports simultaneously, distributed tracing, and rich +error definitions. The server binary is available in cmd/addsvc. The client +binary is available in cmd/addcli. + +Finally, the addtransport package provides both server and clients for each +supported transport. The client structs bake-in certain middlewares, in order to +demonstrate the _client library pattern_. But beware: client libraries are +generally a bad idea, because they easily lead to the + [distributed monolith antipattern](https://www.microservices.com/talks/dont-build-a-distributed-monolith/). +If you don't _know_ you need to use one in your organization, it's probably best +avoided: prefer moving that logic to consumers, and relying on + [contract testing](https://docs.pact.io/best_practices/contract_tests_not_functional_tests.html) +to detect incompatibilities. diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addcli/addcli.go b/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addcli/addcli.go new file mode 100644 index 000000000..cedc22216 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addcli/addcli.go @@ -0,0 +1,226 @@ +package main + +import ( + "context" + "flag" + "fmt" + "os" + "strconv" + "text/tabwriter" + "time" + + "google.golang.org/grpc" + + "github.com/apache/thrift/lib/go/thrift" + lightstep "github.com/lightstep/lightstep-tracer-go" + stdopentracing "github.com/opentracing/opentracing-go" + zipkin "github.com/openzipkin/zipkin-go" + zipkinot "github.com/openzipkin/zipkin-go-opentracing" + zipkinhttp "github.com/openzipkin/zipkin-go/reporter/http" + "sourcegraph.com/sourcegraph/appdash" + appdashot "sourcegraph.com/sourcegraph/appdash/opentracing" + + "github.com/go-kit/kit/log" + + "github.com/go-kit/kit/examples/addsvc/pkg/addservice" + "github.com/go-kit/kit/examples/addsvc/pkg/addtransport" + addthrift "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc" +) + +func main() { + // The addcli presumes no service discovery system, and expects users to + // provide the direct address of an addsvc. This presumption is reflected in + // the addcli binary and the client packages: the -transport.addr flags + // and various client constructors both expect host:port strings. For an + // example service with a client built on top of a service discovery system, + // see profilesvc. + fs := flag.NewFlagSet("addcli", flag.ExitOnError) + var ( + httpAddr = fs.String("http-addr", "", "HTTP address of addsvc") + grpcAddr = fs.String("grpc-addr", "", "gRPC address of addsvc") + thriftAddr = fs.String("thrift-addr", "", "Thrift address of addsvc") + jsonRPCAddr = fs.String("jsonrpc-addr", "", "JSON RPC address of addsvc") + thriftProtocol = fs.String("thrift-protocol", "binary", "binary, compact, json, simplejson") + thriftBuffer = fs.Int("thrift-buffer", 0, "0 for unbuffered") + thriftFramed = fs.Bool("thrift-framed", false, "true to enable framing") + zipkinV2URL = fs.String("zipkin-url", "", "Enable Zipkin v2 tracing (zipkin-go) via HTTP Reporter URL e.g. http://localhost:94111/api/v2/spans") + zipkinV1URL = fs.String("zipkin-v1-url", "", "Enable Zipkin v1 tracing (zipkin-go-opentracing) via a collector URL e.g. http://localhost:9411/api/v1/spans") + lightstepToken = fs.String("lightstep-token", "", "Enable LightStep tracing via a LightStep access token") + appdashAddr = fs.String("appdash-addr", "", "Enable Appdash tracing via an Appdash server host:port") + method = fs.String("method", "sum", "sum, concat") + ) + fs.Usage = usageFor(fs, os.Args[0]+" [flags] ") + fs.Parse(os.Args[1:]) + if len(fs.Args()) != 2 { + fs.Usage() + os.Exit(1) + } + + // This is a demonstration client, which supports multiple tracers. + // Your clients will probably just use one tracer. + var otTracer stdopentracing.Tracer + { + if *zipkinV1URL != "" && *zipkinV2URL == "" { + collector, err := zipkinot.NewHTTPCollector(*zipkinV1URL) + if err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(1) + } + defer collector.Close() + var ( + debug = false + hostPort = "localhost:0" + serviceName = "addsvc-cli" + ) + recorder := zipkinot.NewRecorder(collector, debug, hostPort, serviceName) + otTracer, err = zipkinot.NewTracer(recorder) + if err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(1) + } + } else if *lightstepToken != "" { + otTracer = lightstep.NewTracer(lightstep.Options{ + AccessToken: *lightstepToken, + }) + defer lightstep.FlushLightStepTracer(otTracer) + } else if *appdashAddr != "" { + otTracer = appdashot.NewTracer(appdash.NewRemoteCollector(*appdashAddr)) + } else { + otTracer = stdopentracing.GlobalTracer() // no-op + } + } + + // This is a demonstration of the native Zipkin tracing client. If using + // Zipkin this is the more idiomatic client over OpenTracing. + var zipkinTracer *zipkin.Tracer + { + var ( + err error + hostPort = "" // if host:port is unknown we can keep this empty + serviceName = "addsvc-cli" + useNoopTracer = (*zipkinV2URL == "") + reporter = zipkinhttp.NewReporter(*zipkinV2URL) + ) + defer reporter.Close() + zEP, _ := zipkin.NewEndpoint(serviceName, hostPort) + zipkinTracer, err = zipkin.NewTracer( + reporter, zipkin.WithLocalEndpoint(zEP), zipkin.WithNoopTracer(useNoopTracer), + ) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to create zipkin tracer: %s\n", err.Error()) + os.Exit(1) + } + } + + // This is a demonstration client, which supports multiple transports. + // Your clients will probably just define and stick with 1 transport. + var ( + svc addservice.Service + err error + ) + if *httpAddr != "" { + svc, err = addtransport.NewHTTPClient(*httpAddr, otTracer, zipkinTracer, log.NewNopLogger()) + } else if *grpcAddr != "" { + conn, err := grpc.Dial(*grpcAddr, grpc.WithInsecure(), grpc.WithTimeout(time.Second)) + if err != nil { + fmt.Fprintf(os.Stderr, "error: %v", err) + os.Exit(1) + } + defer conn.Close() + svc = addtransport.NewGRPCClient(conn, otTracer, zipkinTracer, log.NewNopLogger()) + } else if *jsonRPCAddr != "" { + svc, err = addtransport.NewJSONRPCClient(*jsonRPCAddr, otTracer, log.NewNopLogger()) + } else if *thriftAddr != "" { + // It's necessary to do all of this construction in the func main, + // because (among other reasons) we need to control the lifecycle of the + // Thrift transport, i.e. close it eventually. + var protocolFactory thrift.TProtocolFactory + switch *thriftProtocol { + case "compact": + protocolFactory = thrift.NewTCompactProtocolFactory() + case "simplejson": + protocolFactory = thrift.NewTSimpleJSONProtocolFactory() + case "json": + protocolFactory = thrift.NewTJSONProtocolFactory() + case "binary", "": + protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() + default: + fmt.Fprintf(os.Stderr, "error: invalid protocol %q\n", *thriftProtocol) + os.Exit(1) + } + var transportFactory thrift.TTransportFactory + if *thriftBuffer > 0 { + transportFactory = thrift.NewTBufferedTransportFactory(*thriftBuffer) + } else { + transportFactory = thrift.NewTTransportFactory() + } + if *thriftFramed { + transportFactory = thrift.NewTFramedTransportFactory(transportFactory) + } + transportSocket, err := thrift.NewTSocket(*thriftAddr) + if err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + transport, err := transportFactory.GetTransport(transportSocket) + if err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + if err := transport.Open(); err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + defer transport.Close() + client := addthrift.NewAddServiceClientFactory(transport, protocolFactory) + svc = addtransport.NewThriftClient(client) + } else { + fmt.Fprintf(os.Stderr, "error: no remote address specified\n") + os.Exit(1) + } + if err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + + switch *method { + case "sum": + a, _ := strconv.ParseInt(fs.Args()[0], 10, 64) + b, _ := strconv.ParseInt(fs.Args()[1], 10, 64) + v, err := svc.Sum(context.Background(), int(a), int(b)) + if err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + fmt.Fprintf(os.Stdout, "%d + %d = %d\n", a, b, v) + + case "concat": + a := fs.Args()[0] + b := fs.Args()[1] + v, err := svc.Concat(context.Background(), a, b) + if err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } + fmt.Fprintf(os.Stdout, "%q + %q = %q\n", a, b, v) + + default: + fmt.Fprintf(os.Stderr, "error: invalid method %q\n", method) + os.Exit(1) + } +} + +func usageFor(fs *flag.FlagSet, short string) func() { + return func() { + fmt.Fprintf(os.Stderr, "USAGE\n") + fmt.Fprintf(os.Stderr, " %s\n", short) + fmt.Fprintf(os.Stderr, "\n") + fmt.Fprintf(os.Stderr, "FLAGS\n") + w := tabwriter.NewWriter(os.Stderr, 0, 2, 2, ' ', 0) + fs.VisitAll(func(f *flag.Flag) { + fmt.Fprintf(w, "\t-%s %s\t%s\n", f.Name, f.DefValue, f.Usage) + }) + w.Flush() + fmt.Fprintf(os.Stderr, "\n") + } +} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addsvc/addsvc.go b/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addsvc/addsvc.go new file mode 100644 index 000000000..dbd697129 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addsvc/addsvc.go @@ -0,0 +1,322 @@ +package main + +import ( + "flag" + "fmt" + "net" + "net/http" + "os" + "os/signal" + "syscall" + "text/tabwriter" + + "github.com/apache/thrift/lib/go/thrift" + lightstep "github.com/lightstep/lightstep-tracer-go" + "github.com/oklog/oklog/pkg/group" + stdopentracing "github.com/opentracing/opentracing-go" + zipkin "github.com/openzipkin/zipkin-go" + zipkinot "github.com/openzipkin/zipkin-go-opentracing" + zipkinhttp "github.com/openzipkin/zipkin-go/reporter/http" + stdprometheus "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "google.golang.org/grpc" + "sourcegraph.com/sourcegraph/appdash" + appdashot "sourcegraph.com/sourcegraph/appdash/opentracing" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/prometheus" + kitgrpc "github.com/go-kit/kit/transport/grpc" + + addpb "github.com/go-kit/kit/examples/addsvc/pb" + "github.com/go-kit/kit/examples/addsvc/pkg/addendpoint" + "github.com/go-kit/kit/examples/addsvc/pkg/addservice" + "github.com/go-kit/kit/examples/addsvc/pkg/addtransport" + addthrift "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc" +) + +func main() { + // Define our flags. Your service probably won't need to bind listeners for + // *all* supported transports, or support both Zipkin and LightStep, and so + // on, but we do it here for demonstration purposes. + fs := flag.NewFlagSet("addsvc", flag.ExitOnError) + var ( + debugAddr = fs.String("debug.addr", ":8080", "Debug and metrics listen address") + httpAddr = fs.String("http-addr", ":8081", "HTTP listen address") + grpcAddr = fs.String("grpc-addr", ":8082", "gRPC listen address") + thriftAddr = fs.String("thrift-addr", ":8083", "Thrift listen address") + jsonRPCAddr = fs.String("jsonrpc-addr", ":8084", "JSON RPC listen address") + thriftProtocol = fs.String("thrift-protocol", "binary", "binary, compact, json, simplejson") + thriftBuffer = fs.Int("thrift-buffer", 0, "0 for unbuffered") + thriftFramed = fs.Bool("thrift-framed", false, "true to enable framing") + zipkinV2URL = fs.String("zipkin-url", "", "Enable Zipkin v2 tracing (zipkin-go) using a Reporter URL e.g. http://localhost:9411/api/v2/spans") + zipkinV1URL = fs.String("zipkin-v1-url", "", "Enable Zipkin v1 tracing (zipkin-go-opentracing) using a collector URL e.g. http://localhost:9411/api/v1/spans") + lightstepToken = fs.String("lightstep-token", "", "Enable LightStep tracing via a LightStep access token") + appdashAddr = fs.String("appdash-addr", "", "Enable Appdash tracing via an Appdash server host:port") + ) + fs.Usage = usageFor(fs, os.Args[0]+" [flags]") + fs.Parse(os.Args[1:]) + + // Create a single logger, which we'll use and give to other components. + var logger log.Logger + { + logger = log.NewLogfmtLogger(os.Stderr) + logger = log.With(logger, "ts", log.DefaultTimestampUTC) + logger = log.With(logger, "caller", log.DefaultCaller) + } + + // Determine which OpenTracing tracer to use. We'll pass the tracer to all the + // components that use it, as a dependency. + var tracer stdopentracing.Tracer + { + if *zipkinV1URL != "" && *zipkinV2URL == "" { + logger.Log("tracer", "Zipkin", "type", "OpenTracing", "URL", *zipkinV1URL) + collector, err := zipkinot.NewHTTPCollector(*zipkinV1URL) + if err != nil { + logger.Log("err", err) + os.Exit(1) + } + defer collector.Close() + var ( + debug = false + hostPort = "localhost:80" + serviceName = "addsvc" + ) + recorder := zipkinot.NewRecorder(collector, debug, hostPort, serviceName) + tracer, err = zipkinot.NewTracer(recorder) + if err != nil { + logger.Log("err", err) + os.Exit(1) + } + } else if *lightstepToken != "" { + logger.Log("tracer", "LightStep") // probably don't want to print out the token :) + tracer = lightstep.NewTracer(lightstep.Options{ + AccessToken: *lightstepToken, + }) + defer lightstep.FlushLightStepTracer(tracer) + } else if *appdashAddr != "" { + logger.Log("tracer", "Appdash", "addr", *appdashAddr) + tracer = appdashot.NewTracer(appdash.NewRemoteCollector(*appdashAddr)) + } else { + tracer = stdopentracing.GlobalTracer() // no-op + } + } + + var zipkinTracer *zipkin.Tracer + { + var ( + err error + hostPort = "localhost:80" + serviceName = "addsvc" + useNoopTracer = (*zipkinV2URL == "") + reporter = zipkinhttp.NewReporter(*zipkinV2URL) + ) + defer reporter.Close() + zEP, _ := zipkin.NewEndpoint(serviceName, hostPort) + zipkinTracer, err = zipkin.NewTracer( + reporter, zipkin.WithLocalEndpoint(zEP), zipkin.WithNoopTracer(useNoopTracer), + ) + if err != nil { + logger.Log("err", err) + os.Exit(1) + } + if !useNoopTracer { + logger.Log("tracer", "Zipkin", "type", "Native", "URL", *zipkinV2URL) + } + } + + // Create the (sparse) metrics we'll use in the service. They, too, are + // dependencies that we pass to components that use them. + var ints, chars metrics.Counter + { + // Business-level metrics. + ints = prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: "example", + Subsystem: "addsvc", + Name: "integers_summed", + Help: "Total count of integers summed via the Sum method.", + }, []string{}) + chars = prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: "example", + Subsystem: "addsvc", + Name: "characters_concatenated", + Help: "Total count of characters concatenated via the Concat method.", + }, []string{}) + } + var duration metrics.Histogram + { + // Endpoint-level metrics. + duration = prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ + Namespace: "example", + Subsystem: "addsvc", + Name: "request_duration_seconds", + Help: "Request duration in seconds.", + }, []string{"method", "success"}) + } + http.DefaultServeMux.Handle("/metrics", promhttp.Handler()) + + // Build the layers of the service "onion" from the inside out. First, the + // business logic service; then, the set of endpoints that wrap the service; + // and finally, a series of concrete transport adapters. The adapters, like + // the HTTP handler or the gRPC server, are the bridge between Go kit and + // the interfaces that the transports expect. Note that we're not binding + // them to ports or anything yet; we'll do that next. + var ( + service = addservice.New(logger, ints, chars) + endpoints = addendpoint.New(service, logger, duration, tracer, zipkinTracer) + httpHandler = addtransport.NewHTTPHandler(endpoints, tracer, zipkinTracer, logger) + grpcServer = addtransport.NewGRPCServer(endpoints, tracer, zipkinTracer, logger) + thriftServer = addtransport.NewThriftServer(endpoints) + jsonrpcHandler = addtransport.NewJSONRPCHandler(endpoints, logger) + ) + + // Now we're to the part of the func main where we want to start actually + // running things, like servers bound to listeners to receive connections. + // + // The method is the same for each component: add a new actor to the group + // struct, which is a combination of 2 anonymous functions: the first + // function actually runs the component, and the second function should + // interrupt the first function and cause it to return. It's in these + // functions that we actually bind the Go kit server/handler structs to the + // concrete transports and run them. + // + // Putting each component into its own block is mostly for aesthetics: it + // clearly demarcates the scope in which each listener/socket may be used. + var g group.Group + { + // The debug listener mounts the http.DefaultServeMux, and serves up + // stuff like the Prometheus metrics route, the Go debug and profiling + // routes, and so on. + debugListener, err := net.Listen("tcp", *debugAddr) + if err != nil { + logger.Log("transport", "debug/HTTP", "during", "Listen", "err", err) + os.Exit(1) + } + g.Add(func() error { + logger.Log("transport", "debug/HTTP", "addr", *debugAddr) + return http.Serve(debugListener, http.DefaultServeMux) + }, func(error) { + debugListener.Close() + }) + } + { + // The HTTP listener mounts the Go kit HTTP handler we created. + httpListener, err := net.Listen("tcp", *httpAddr) + if err != nil { + logger.Log("transport", "HTTP", "during", "Listen", "err", err) + os.Exit(1) + } + g.Add(func() error { + logger.Log("transport", "HTTP", "addr", *httpAddr) + return http.Serve(httpListener, httpHandler) + }, func(error) { + httpListener.Close() + }) + } + { + // The gRPC listener mounts the Go kit gRPC server we created. + grpcListener, err := net.Listen("tcp", *grpcAddr) + if err != nil { + logger.Log("transport", "gRPC", "during", "Listen", "err", err) + os.Exit(1) + } + g.Add(func() error { + logger.Log("transport", "gRPC", "addr", *grpcAddr) + // we add the Go Kit gRPC Interceptor to our gRPC service as it is used by + // the here demonstrated zipkin tracing middleware. + baseServer := grpc.NewServer(grpc.UnaryInterceptor(kitgrpc.Interceptor)) + addpb.RegisterAddServer(baseServer, grpcServer) + return baseServer.Serve(grpcListener) + }, func(error) { + grpcListener.Close() + }) + } + { + // The Thrift socket mounts the Go kit Thrift server we created earlier. + // There's a lot of boilerplate involved here, related to configuring + // the protocol and transport; blame Thrift. + thriftSocket, err := thrift.NewTServerSocket(*thriftAddr) + if err != nil { + logger.Log("transport", "Thrift", "during", "Listen", "err", err) + os.Exit(1) + } + g.Add(func() error { + logger.Log("transport", "Thrift", "addr", *thriftAddr) + var protocolFactory thrift.TProtocolFactory + switch *thriftProtocol { + case "binary": + protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() + case "compact": + protocolFactory = thrift.NewTCompactProtocolFactory() + case "json": + protocolFactory = thrift.NewTJSONProtocolFactory() + case "simplejson": + protocolFactory = thrift.NewTSimpleJSONProtocolFactory() + default: + return fmt.Errorf("invalid Thrift protocol %q", *thriftProtocol) + } + var transportFactory thrift.TTransportFactory + if *thriftBuffer > 0 { + transportFactory = thrift.NewTBufferedTransportFactory(*thriftBuffer) + } else { + transportFactory = thrift.NewTTransportFactory() + } + if *thriftFramed { + transportFactory = thrift.NewTFramedTransportFactory(transportFactory) + } + return thrift.NewTSimpleServer4( + addthrift.NewAddServiceProcessor(thriftServer), + thriftSocket, + transportFactory, + protocolFactory, + ).Serve() + }, func(error) { + thriftSocket.Close() + }) + } + { + httpListener, err := net.Listen("tcp", *jsonRPCAddr) + if err != nil { + logger.Log("transport", "JSONRPC over HTTP", "during", "Listen", "err", err) + os.Exit(1) + } + g.Add(func() error { + logger.Log("transport", "JSONRPC over HTTP", "addr", *jsonRPCAddr) + return http.Serve(httpListener, jsonrpcHandler) + }, func(error) { + httpListener.Close() + }) + } + { + // This function just sits and waits for ctrl-C. + cancelInterrupt := make(chan struct{}) + g.Add(func() error { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) + select { + case sig := <-c: + return fmt.Errorf("received signal %s", sig) + case <-cancelInterrupt: + return nil + } + }, func(error) { + close(cancelInterrupt) + }) + } + logger.Log("exit", g.Run()) +} + +func usageFor(fs *flag.FlagSet, short string) func() { + return func() { + fmt.Fprintf(os.Stderr, "USAGE\n") + fmt.Fprintf(os.Stderr, " %s\n", short) + fmt.Fprintf(os.Stderr, "\n") + fmt.Fprintf(os.Stderr, "FLAGS\n") + w := tabwriter.NewWriter(os.Stderr, 0, 2, 2, ' ', 0) + fs.VisitAll(func(f *flag.Flag) { + fmt.Fprintf(w, "\t-%s %s\t%s\n", f.Name, f.DefValue, f.Usage) + }) + w.Flush() + fmt.Fprintf(os.Stderr, "\n") + } +} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addsvc/pact_test.go b/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addsvc/pact_test.go new file mode 100644 index 000000000..2c709b58b --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addsvc/pact_test.go @@ -0,0 +1,55 @@ +package main + +import ( + "fmt" + "net/http" + "os" + "strings" + "testing" + + "github.com/pact-foundation/pact-go/dsl" +) + +func TestPactStringsvcUppercase(t *testing.T) { + if os.Getenv("WRITE_PACTS") == "" { + t.Skip("skipping Pact contracts; set WRITE_PACTS environment variable to enable") + } + + pact := dsl.Pact{ + Port: 6666, + Consumer: "addsvc", + Provider: "stringsvc", + } + defer pact.Teardown() + + pact.AddInteraction(). + UponReceiving("stringsvc uppercase"). + WithRequest(dsl.Request{ + Headers: map[string]string{"Content-Type": "application/json; charset=utf-8"}, + Method: "POST", + Path: "/uppercase", + Body: `{"s":"foo"}`, + }). + WillRespondWith(dsl.Response{ + Status: 200, + Headers: map[string]string{"Content-Type": "application/json; charset=utf-8"}, + Body: `{"v":"FOO"}`, + }) + + if err := pact.Verify(func() error { + u := fmt.Sprintf("http://localhost:%d/uppercase", pact.Server.Port) + req, err := http.NewRequest("POST", u, strings.NewReader(`{"s":"foo"}`)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json; charset=utf-8") + if _, err = http.DefaultClient.Do(req); err != nil { + return err + } + return nil + }); err != nil { + t.Fatal(err) + } + + pact.WritePact() +} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addsvc/wiring_test.go b/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addsvc/wiring_test.go new file mode 100644 index 000000000..f9b3551b2 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/addsvc/cmd/addsvc/wiring_test.go @@ -0,0 +1,42 @@ +package main + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/opentracing/opentracing-go" + zipkin "github.com/openzipkin/zipkin-go" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/metrics/discard" + + "github.com/go-kit/kit/examples/addsvc/pkg/addendpoint" + "github.com/go-kit/kit/examples/addsvc/pkg/addservice" + "github.com/go-kit/kit/examples/addsvc/pkg/addtransport" +) + +func TestHTTP(t *testing.T) { + zkt, _ := zipkin.NewTracer(nil, zipkin.WithNoopTracer(true)) + svc := addservice.New(log.NewNopLogger(), discard.NewCounter(), discard.NewCounter()) + eps := addendpoint.New(svc, log.NewNopLogger(), discard.NewHistogram(), opentracing.GlobalTracer(), zkt) + mux := addtransport.NewHTTPHandler(eps, opentracing.GlobalTracer(), zkt, log.NewNopLogger()) + srv := httptest.NewServer(mux) + defer srv.Close() + + for _, testcase := range []struct { + method, url, body, want string + }{ + {"GET", srv.URL + "/concat", `{"a":"1","b":"2"}`, `{"v":"12"}`}, + {"GET", srv.URL + "/sum", `{"a":1,"b":2}`, `{"v":3}`}, + } { + req, _ := http.NewRequest(testcase.method, testcase.url, strings.NewReader(testcase.body)) + resp, _ := http.DefaultClient.Do(req) + body, _ := ioutil.ReadAll(resp.Body) + if want, have := testcase.want, strings.TrimSpace(string(body)); want != have { + t.Errorf("%s %s %s: want %q, have %q", testcase.method, testcase.url, testcase.body, want, have) + } + } +} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/pb/addsvc.pb.go b/vendor/github.com/go-kit/kit/examples/addsvc/pb/addsvc.pb.go new file mode 100644 index 000000000..781b50b75 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/addsvc/pb/addsvc.pb.go @@ -0,0 +1,270 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: addsvc.proto + +/* +Package pb is a generated protocol buffer package. + +It is generated from these files: + addsvc.proto + +It has these top-level messages: + SumRequest + SumReply + ConcatRequest + ConcatReply +*/ +package pb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The sum request contains two parameters. +type SumRequest struct { + A int64 `protobuf:"varint,1,opt,name=a" json:"a,omitempty"` + B int64 `protobuf:"varint,2,opt,name=b" json:"b,omitempty"` +} + +func (m *SumRequest) Reset() { *m = SumRequest{} } +func (m *SumRequest) String() string { return proto.CompactTextString(m) } +func (*SumRequest) ProtoMessage() {} +func (*SumRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *SumRequest) GetA() int64 { + if m != nil { + return m.A + } + return 0 +} + +func (m *SumRequest) GetB() int64 { + if m != nil { + return m.B + } + return 0 +} + +// The sum response contains the result of the calculation. +type SumReply struct { + V int64 `protobuf:"varint,1,opt,name=v" json:"v,omitempty"` + Err string `protobuf:"bytes,2,opt,name=err" json:"err,omitempty"` +} + +func (m *SumReply) Reset() { *m = SumReply{} } +func (m *SumReply) String() string { return proto.CompactTextString(m) } +func (*SumReply) ProtoMessage() {} +func (*SumReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *SumReply) GetV() int64 { + if m != nil { + return m.V + } + return 0 +} + +func (m *SumReply) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +// The Concat request contains two parameters. +type ConcatRequest struct { + A string `protobuf:"bytes,1,opt,name=a" json:"a,omitempty"` + B string `protobuf:"bytes,2,opt,name=b" json:"b,omitempty"` +} + +func (m *ConcatRequest) Reset() { *m = ConcatRequest{} } +func (m *ConcatRequest) String() string { return proto.CompactTextString(m) } +func (*ConcatRequest) ProtoMessage() {} +func (*ConcatRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ConcatRequest) GetA() string { + if m != nil { + return m.A + } + return "" +} + +func (m *ConcatRequest) GetB() string { + if m != nil { + return m.B + } + return "" +} + +// The Concat response contains the result of the concatenation. +type ConcatReply struct { + V string `protobuf:"bytes,1,opt,name=v" json:"v,omitempty"` + Err string `protobuf:"bytes,2,opt,name=err" json:"err,omitempty"` +} + +func (m *ConcatReply) Reset() { *m = ConcatReply{} } +func (m *ConcatReply) String() string { return proto.CompactTextString(m) } +func (*ConcatReply) ProtoMessage() {} +func (*ConcatReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ConcatReply) GetV() string { + if m != nil { + return m.V + } + return "" +} + +func (m *ConcatReply) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +func init() { + proto.RegisterType((*SumRequest)(nil), "pb.SumRequest") + proto.RegisterType((*SumReply)(nil), "pb.SumReply") + proto.RegisterType((*ConcatRequest)(nil), "pb.ConcatRequest") + proto.RegisterType((*ConcatReply)(nil), "pb.ConcatReply") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Add service + +type AddClient interface { + // Sums two integers. + Sum(ctx context.Context, in *SumRequest, opts ...grpc.CallOption) (*SumReply, error) + // Concatenates two strings + Concat(ctx context.Context, in *ConcatRequest, opts ...grpc.CallOption) (*ConcatReply, error) +} + +type addClient struct { + cc *grpc.ClientConn +} + +func NewAddClient(cc *grpc.ClientConn) AddClient { + return &addClient{cc} +} + +func (c *addClient) Sum(ctx context.Context, in *SumRequest, opts ...grpc.CallOption) (*SumReply, error) { + out := new(SumReply) + err := grpc.Invoke(ctx, "/pb.Add/Sum", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *addClient) Concat(ctx context.Context, in *ConcatRequest, opts ...grpc.CallOption) (*ConcatReply, error) { + out := new(ConcatReply) + err := grpc.Invoke(ctx, "/pb.Add/Concat", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Add service + +type AddServer interface { + // Sums two integers. + Sum(context.Context, *SumRequest) (*SumReply, error) + // Concatenates two strings + Concat(context.Context, *ConcatRequest) (*ConcatReply, error) +} + +func RegisterAddServer(s *grpc.Server, srv AddServer) { + s.RegisterService(&_Add_serviceDesc, srv) +} + +func _Add_Sum_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SumRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AddServer).Sum(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Add/Sum", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AddServer).Sum(ctx, req.(*SumRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Add_Concat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ConcatRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AddServer).Concat(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Add/Concat", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AddServer).Concat(ctx, req.(*ConcatRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Add_serviceDesc = grpc.ServiceDesc{ + ServiceName: "pb.Add", + HandlerType: (*AddServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Sum", + Handler: _Add_Sum_Handler, + }, + { + MethodName: "Concat", + Handler: _Add_Concat_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "addsvc.proto", +} + +func init() { proto.RegisterFile("addsvc.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 189 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0x4c, 0x49, 0x29, + 0x2e, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2a, 0x48, 0x52, 0xd2, 0xe0, 0xe2, + 0x0a, 0x2e, 0xcd, 0x0d, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0xe2, 0xe1, 0x62, 0x4c, 0x94, + 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0e, 0x62, 0x4c, 0x04, 0xf1, 0x92, 0x24, 0x98, 0x20, 0xbc, 0x24, + 0x25, 0x2d, 0x2e, 0x0e, 0xb0, 0xca, 0x82, 0x9c, 0x4a, 0x90, 0x4c, 0x19, 0x4c, 0x5d, 0x99, 0x90, + 0x00, 0x17, 0x73, 0x6a, 0x51, 0x11, 0x58, 0x25, 0x67, 0x10, 0x88, 0xa9, 0xa4, 0xcd, 0xc5, 0xeb, + 0x9c, 0x9f, 0x97, 0x9c, 0x58, 0x82, 0x61, 0x30, 0x27, 0x8a, 0xc1, 0x9c, 0x20, 0x83, 0x75, 0xb9, + 0xb8, 0x61, 0x8a, 0x51, 0xcc, 0xe6, 0xc4, 0x6a, 0xb6, 0x51, 0x0c, 0x17, 0xb3, 0x63, 0x4a, 0x8a, + 0x90, 0x2a, 0x17, 0x73, 0x70, 0x69, 0xae, 0x10, 0x9f, 0x5e, 0x41, 0x92, 0x1e, 0xc2, 0x07, 0x52, + 0x3c, 0x70, 0x7e, 0x41, 0x4e, 0xa5, 0x12, 0x83, 0x90, 0x1e, 0x17, 0x1b, 0xc4, 0x70, 0x21, 0x41, + 0x90, 0x0c, 0x8a, 0xab, 0xa4, 0xf8, 0x91, 0x85, 0xc0, 0xea, 0x93, 0xd8, 0xc0, 0x41, 0x63, 0x0c, + 0x08, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x37, 0x81, 0x99, 0x2a, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/pb/addsvc.proto b/vendor/github.com/go-kit/kit/examples/addsvc/pb/addsvc.proto new file mode 100644 index 000000000..cf61532f3 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/addsvc/pb/addsvc.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package pb; + +// The Add service definition. +service Add { + // Sums two integers. + rpc Sum (SumRequest) returns (SumReply) {} + + // Concatenates two strings + rpc Concat (ConcatRequest) returns (ConcatReply) {} +} + +// The sum request contains two parameters. +message SumRequest { + int64 a = 1; + int64 b = 2; +} + +// The sum response contains the result of the calculation. +message SumReply { + int64 v = 1; + string err = 2; +} + +// The Concat request contains two parameters. +message ConcatRequest { + string a = 1; + string b = 2; +} + +// The Concat response contains the result of the concatenation. +message ConcatReply { + string v = 1; + string err = 2; +} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/pb/compile.sh b/vendor/github.com/go-kit/kit/examples/addsvc/pb/compile.sh new file mode 100755 index 000000000..c0268442a --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/addsvc/pb/compile.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env sh + +# Install proto3 from source +# brew install autoconf automake libtool +# git clone https://github.com/google/protobuf +# ./autogen.sh ; ./configure ; make ; make install +# +# Update protoc Go bindings via +# go get -u github.com/golang/protobuf/{proto,protoc-gen-go} +# +# See also +# https://github.com/grpc/grpc-go/tree/master/examples + +protoc addsvc.proto --go_out=plugins=grpc:. diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/addsvc.thrift b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/addsvc.thrift new file mode 100644 index 000000000..e67ce1b21 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/addsvc.thrift @@ -0,0 +1,14 @@ +struct SumReply { + 1: i64 value + 2: string err +} + +struct ConcatReply { + 1: string value + 2: string err +} + +service AddService { + SumReply Sum(1: i64 a, 2: i64 b) + ConcatReply Concat(1: string a, 2: string b) +} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/compile.sh b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/compile.sh new file mode 100755 index 000000000..293e68e0e --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/compile.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env sh + +# See also https://thrift.apache.org/tutorial/go. +# +# An old version can be obtained via `brew install thrift`. +# For the latest, here's the annoying dance: +# +# brew install automake bison pkg-config openssl +# ln -s /usr/local/opt/openssl/include/openssl /usr/local/include # if it isn't already +# git clone git@github.com:apache/thrift +# ./bootstrap.sh +# bash +# export PATH=/usr/local/Cellar/bison/*/bin:$PATH +# ./configure ./configure --without-qt4 --without-qt5 --without-c_glib --without-csharp --without-java --without-erlang --without-nodejs --without-lua --without-python --without-perl --without-php --without-php_extension --without-dart --without-ruby --without-haskell --without-rs --without-cl --without-haxe --without-dotnetcore --without-d +# make +# sudo make install + +thrift -r --gen "go:package_prefix=github.com/go-kit/kit/examples/addsvc/thrift/gen-go/,thrift_import=github.com/apache/thrift/lib/go/thrift" addsvc.thrift diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/GoUnusedProtection__.go b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/GoUnusedProtection__.go new file mode 100644 index 000000000..88387e148 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/GoUnusedProtection__.go @@ -0,0 +1,7 @@ +// Autogenerated by Thrift Compiler (1.0.0-dev) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package addsvc + +var GoUnusedProtection__ int; + diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/add_service-remote/add_service-remote.go b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/add_service-remote/add_service-remote.go new file mode 100755 index 000000000..9e6800e72 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/add_service-remote/add_service-remote.go @@ -0,0 +1,186 @@ +// Autogenerated by Thrift Compiler (1.0.0-dev) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package main + +import ( + "context" + "flag" + "fmt" + "math" + "net" + "net/url" + "os" + "strconv" + "strings" + "github.com/apache/thrift/lib/go/thrift" + "github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc" +) + + +func Usage() { + fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") + flag.PrintDefaults() + fmt.Fprintln(os.Stderr, "\nFunctions:") + fmt.Fprintln(os.Stderr, " SumReply Sum(i64 a, i64 b)") + fmt.Fprintln(os.Stderr, " ConcatReply Concat(string a, string b)") + fmt.Fprintln(os.Stderr) + os.Exit(0) +} + +type httpHeaders map[string]string + +func (h httpHeaders) String() string { + var m map[string]string = h + return fmt.Sprintf("%s", m) +} + +func (h httpHeaders) Set(value string) error { + parts := strings.Split(value, ": ") + if len(parts) != 2 { + return fmt.Errorf("header should be of format 'Key: Value'") + } + h[parts[0]] = parts[1] + return nil +} + +func main() { + flag.Usage = Usage + var host string + var port int + var protocol string + var urlString string + var framed bool + var useHttp bool + headers := make(httpHeaders) + var parsedUrl *url.URL + var trans thrift.TTransport + _ = strconv.Atoi + _ = math.Abs + flag.Usage = Usage + flag.StringVar(&host, "h", "localhost", "Specify host and port") + flag.IntVar(&port, "p", 9090, "Specify port") + flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") + flag.StringVar(&urlString, "u", "", "Specify the url") + flag.BoolVar(&framed, "framed", false, "Use framed transport") + flag.BoolVar(&useHttp, "http", false, "Use http") + flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")") + flag.Parse() + + if len(urlString) > 0 { + var err error + parsedUrl, err = url.Parse(urlString) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + host = parsedUrl.Host + useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https" + } else if useHttp { + _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + } + + cmd := flag.Arg(0) + var err error + if useHttp { + trans, err = thrift.NewTHttpClient(parsedUrl.String()) + if len(headers) > 0 { + httptrans := trans.(*thrift.THttpClient) + for key, value := range headers { + httptrans.SetHeader(key, value) + } + } + } else { + portStr := fmt.Sprint(port) + if strings.Contains(host, ":") { + host, portStr, err = net.SplitHostPort(host) + if err != nil { + fmt.Fprintln(os.Stderr, "error with host:", err) + os.Exit(1) + } + } + trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) + if err != nil { + fmt.Fprintln(os.Stderr, "error resolving address:", err) + os.Exit(1) + } + if framed { + trans = thrift.NewTFramedTransport(trans) + } + } + if err != nil { + fmt.Fprintln(os.Stderr, "Error creating transport", err) + os.Exit(1) + } + defer trans.Close() + var protocolFactory thrift.TProtocolFactory + switch protocol { + case "compact": + protocolFactory = thrift.NewTCompactProtocolFactory() + break + case "simplejson": + protocolFactory = thrift.NewTSimpleJSONProtocolFactory() + break + case "json": + protocolFactory = thrift.NewTJSONProtocolFactory() + break + case "binary", "": + protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() + break + default: + fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) + Usage() + os.Exit(1) + } + iprot := protocolFactory.GetProtocol(trans) + oprot := protocolFactory.GetProtocol(trans) + client := addsvc.NewAddServiceClient(thrift.NewTStandardClient(iprot, oprot)) + if err := trans.Open(); err != nil { + fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) + os.Exit(1) + } + + switch cmd { + case "Sum": + if flag.NArg() - 1 != 2 { + fmt.Fprintln(os.Stderr, "Sum requires 2 args") + flag.Usage() + } + argvalue0, err6 := (strconv.ParseInt(flag.Arg(1), 10, 64)) + if err6 != nil { + Usage() + return + } + value0 := argvalue0 + argvalue1, err7 := (strconv.ParseInt(flag.Arg(2), 10, 64)) + if err7 != nil { + Usage() + return + } + value1 := argvalue1 + fmt.Print(client.Sum(context.Background(), value0, value1)) + fmt.Print("\n") + break + case "Concat": + if flag.NArg() - 1 != 2 { + fmt.Fprintln(os.Stderr, "Concat requires 2 args") + flag.Usage() + } + argvalue0 := flag.Arg(1) + value0 := argvalue0 + argvalue1 := flag.Arg(2) + value1 := argvalue1 + fmt.Print(client.Concat(context.Background(), value0, value1)) + fmt.Print("\n") + break + case "": + Usage() + break + default: + fmt.Fprintln(os.Stderr, "Invalid function ", cmd) + } +} diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/addsvc-consts.go b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/addsvc-consts.go new file mode 100644 index 000000000..eea54fe19 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/addsvc-consts.go @@ -0,0 +1,24 @@ +// Autogenerated by Thrift Compiler (1.0.0-dev) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package addsvc + +import ( + "bytes" + "context" + "reflect" + "fmt" + "github.com/apache/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + + +func init() { +} + diff --git a/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/addsvc.go b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/addsvc.go new file mode 100644 index 000000000..14a87e6d2 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/addsvc/thrift/gen-go/addsvc/addsvc.go @@ -0,0 +1,935 @@ +// Autogenerated by Thrift Compiler (1.0.0-dev) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package addsvc + +import ( + "bytes" + "context" + "reflect" + "fmt" + "github.com/apache/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +// Attributes: +// - Value +// - Err +type SumReply struct { + Value int64 `thrift:"value,1" db:"value" json:"value"` + Err string `thrift:"err,2" db:"err" json:"err"` +} + +func NewSumReply() *SumReply { + return &SumReply{} +} + + +func (p *SumReply) GetValue() int64 { + return p.Value +} + +func (p *SumReply) GetErr() string { + return p.Err +} +func (p *SumReply) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *SumReply) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Value = v +} + return nil +} + +func (p *SumReply) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Err = v +} + return nil +} + +func (p *SumReply) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("SumReply"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(oprot); err != nil { return err } + if err := p.writeField2(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *SumReply) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) } + if err := oprot.WriteI64(int64(p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) } + return err +} + +func (p *SumReply) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:err: ", p), err) } + if err := oprot.WriteString(string(p.Err)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.err (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:err: ", p), err) } + return err +} + +func (p *SumReply) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SumReply(%+v)", *p) +} + +// Attributes: +// - Value +// - Err +type ConcatReply struct { + Value string `thrift:"value,1" db:"value" json:"value"` + Err string `thrift:"err,2" db:"err" json:"err"` +} + +func NewConcatReply() *ConcatReply { + return &ConcatReply{} +} + + +func (p *ConcatReply) GetValue() string { + return p.Value +} + +func (p *ConcatReply) GetErr() string { + return p.Err +} +func (p *ConcatReply) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConcatReply) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Value = v +} + return nil +} + +func (p *ConcatReply) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Err = v +} + return nil +} + +func (p *ConcatReply) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ConcatReply"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(oprot); err != nil { return err } + if err := p.writeField2(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *ConcatReply) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:value: ", p), err) } + if err := oprot.WriteString(string(p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:value: ", p), err) } + return err +} + +func (p *ConcatReply) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:err: ", p), err) } + if err := oprot.WriteString(string(p.Err)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.err (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:err: ", p), err) } + return err +} + +func (p *ConcatReply) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConcatReply(%+v)", *p) +} + +type AddService interface { + // Parameters: + // - A + // - B + Sum(ctx context.Context, a int64, b int64) (r *SumReply, err error) + // Parameters: + // - A + // - B + Concat(ctx context.Context, a string, b string) (r *ConcatReply, err error) +} + +type AddServiceClient struct { + c thrift.TClient +} + +func NewAddServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AddServiceClient { + return &AddServiceClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewAddServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AddServiceClient { + return &AddServiceClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewAddServiceClient(c thrift.TClient) *AddServiceClient { + return &AddServiceClient{ + c: c, + } +} + +func (p *AddServiceClient) Client_() thrift.TClient { + return p.c +} +// Parameters: +// - A +// - B +func (p *AddServiceClient) Sum(ctx context.Context, a int64, b int64) (r *SumReply, err error) { + var _args0 AddServiceSumArgs + _args0.A = a + _args0.B = b + var _result1 AddServiceSumResult + if err = p.Client_().Call(ctx, "Sum", &_args0, &_result1); err != nil { + return + } + return _result1.GetSuccess(), nil +} + +// Parameters: +// - A +// - B +func (p *AddServiceClient) Concat(ctx context.Context, a string, b string) (r *ConcatReply, err error) { + var _args2 AddServiceConcatArgs + _args2.A = a + _args2.B = b + var _result3 AddServiceConcatResult + if err = p.Client_().Call(ctx, "Concat", &_args2, &_result3); err != nil { + return + } + return _result3.GetSuccess(), nil +} + +type AddServiceProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler AddService +} + +func (p *AddServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *AddServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *AddServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewAddServiceProcessor(handler AddService) *AddServiceProcessor { + + self4 := &AddServiceProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} + self4.processorMap["Sum"] = &addServiceProcessorSum{handler:handler} + self4.processorMap["Concat"] = &addServiceProcessorConcat{handler:handler} +return self4 +} + +func (p *AddServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { return false, err } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x5.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x5 + +} + +type addServiceProcessorSum struct { + handler AddService +} + +func (p *addServiceProcessorSum) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AddServiceSumArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("Sum", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AddServiceSumResult{} +var retval *SumReply + var err2 error + if retval, err2 = p.handler.Sum(ctx, args.A, args.B); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing Sum: " + err2.Error()) + oprot.WriteMessageBegin("Sum", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval +} + if err2 = oprot.WriteMessageBegin("Sum", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type addServiceProcessorConcat struct { + handler AddService +} + +func (p *addServiceProcessorConcat) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AddServiceConcatArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("Concat", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AddServiceConcatResult{} +var retval *ConcatReply + var err2 error + if retval, err2 = p.handler.Concat(ctx, args.A, args.B); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing Concat: " + err2.Error()) + oprot.WriteMessageBegin("Concat", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval +} + if err2 = oprot.WriteMessageBegin("Concat", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - A +// - B +type AddServiceSumArgs struct { + A int64 `thrift:"a,1" db:"a" json:"a"` + B int64 `thrift:"b,2" db:"b" json:"b"` +} + +func NewAddServiceSumArgs() *AddServiceSumArgs { + return &AddServiceSumArgs{} +} + + +func (p *AddServiceSumArgs) GetA() int64 { + return p.A +} + +func (p *AddServiceSumArgs) GetB() int64 { + return p.B +} +func (p *AddServiceSumArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AddServiceSumArgs) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.A = v +} + return nil +} + +func (p *AddServiceSumArgs) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.B = v +} + return nil +} + +func (p *AddServiceSumArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Sum_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(oprot); err != nil { return err } + if err := p.writeField2(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *AddServiceSumArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("a", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:a: ", p), err) } + if err := oprot.WriteI64(int64(p.A)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.a (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:a: ", p), err) } + return err +} + +func (p *AddServiceSumArgs) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("b", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:b: ", p), err) } + if err := oprot.WriteI64(int64(p.B)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.b (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:b: ", p), err) } + return err +} + +func (p *AddServiceSumArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AddServiceSumArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AddServiceSumResult struct { + Success *SumReply `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAddServiceSumResult() *AddServiceSumResult { + return &AddServiceSumResult{} +} + +var AddServiceSumResult_Success_DEFAULT *SumReply +func (p *AddServiceSumResult) GetSuccess() *SumReply { + if !p.IsSetSuccess() { + return AddServiceSumResult_Success_DEFAULT + } +return p.Success +} +func (p *AddServiceSumResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AddServiceSumResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AddServiceSumResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &SumReply{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AddServiceSumResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Sum_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *AddServiceSumResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *AddServiceSumResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AddServiceSumResult(%+v)", *p) +} + +// Attributes: +// - A +// - B +type AddServiceConcatArgs struct { + A string `thrift:"a,1" db:"a" json:"a"` + B string `thrift:"b,2" db:"b" json:"b"` +} + +func NewAddServiceConcatArgs() *AddServiceConcatArgs { + return &AddServiceConcatArgs{} +} + + +func (p *AddServiceConcatArgs) GetA() string { + return p.A +} + +func (p *AddServiceConcatArgs) GetB() string { + return p.B +} +func (p *AddServiceConcatArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AddServiceConcatArgs) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.A = v +} + return nil +} + +func (p *AddServiceConcatArgs) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.B = v +} + return nil +} + +func (p *AddServiceConcatArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Concat_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(oprot); err != nil { return err } + if err := p.writeField2(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *AddServiceConcatArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("a", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:a: ", p), err) } + if err := oprot.WriteString(string(p.A)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.a (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:a: ", p), err) } + return err +} + +func (p *AddServiceConcatArgs) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("b", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:b: ", p), err) } + if err := oprot.WriteString(string(p.B)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.b (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:b: ", p), err) } + return err +} + +func (p *AddServiceConcatArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AddServiceConcatArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AddServiceConcatResult struct { + Success *ConcatReply `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAddServiceConcatResult() *AddServiceConcatResult { + return &AddServiceConcatResult{} +} + +var AddServiceConcatResult_Success_DEFAULT *ConcatReply +func (p *AddServiceConcatResult) GetSuccess() *ConcatReply { + if !p.IsSetSuccess() { + return AddServiceConcatResult_Success_DEFAULT + } +return p.Success +} +func (p *AddServiceConcatResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AddServiceConcatResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AddServiceConcatResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConcatReply{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AddServiceConcatResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Concat_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *AddServiceConcatResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *AddServiceConcatResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AddServiceConcatResult(%+v)", *p) +} + + diff --git a/vendor/github.com/go-kit/kit/examples/apigateway/main.go b/vendor/github.com/go-kit/kit/examples/apigateway/main.go new file mode 100644 index 000000000..5891241d1 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/apigateway/main.go @@ -0,0 +1,287 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/signal" + "strings" + "syscall" + "time" + + consulsd "github.com/go-kit/kit/sd/consul" + "github.com/gorilla/mux" + "github.com/hashicorp/consul/api" + stdopentracing "github.com/opentracing/opentracing-go" + stdzipkin "github.com/openzipkin/zipkin-go" + "google.golang.org/grpc" + + "github.com/go-kit/kit/endpoint" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/sd" + "github.com/go-kit/kit/sd/lb" + httptransport "github.com/go-kit/kit/transport/http" + + "github.com/go-kit/kit/examples/addsvc/pkg/addendpoint" + "github.com/go-kit/kit/examples/addsvc/pkg/addservice" + "github.com/go-kit/kit/examples/addsvc/pkg/addtransport" +) + +func main() { + var ( + httpAddr = flag.String("http.addr", ":8000", "Address for HTTP (JSON) server") + consulAddr = flag.String("consul.addr", "", "Consul agent address") + retryMax = flag.Int("retry.max", 3, "per-request retries to different instances") + retryTimeout = flag.Duration("retry.timeout", 500*time.Millisecond, "per-request timeout, including retries") + ) + flag.Parse() + + // Logging domain. + var logger log.Logger + { + logger = log.NewLogfmtLogger(os.Stderr) + logger = log.With(logger, "ts", log.DefaultTimestampUTC) + logger = log.With(logger, "caller", log.DefaultCaller) + } + + // Service discovery domain. In this example we use Consul. + var client consulsd.Client + { + consulConfig := api.DefaultConfig() + if len(*consulAddr) > 0 { + consulConfig.Address = *consulAddr + } + consulClient, err := api.NewClient(consulConfig) + if err != nil { + logger.Log("err", err) + os.Exit(1) + } + client = consulsd.NewClient(consulClient) + } + + // Transport domain. + tracer := stdopentracing.GlobalTracer() // no-op + zipkinTracer, _ := stdzipkin.NewTracer(nil, stdzipkin.WithNoopTracer(true)) + ctx := context.Background() + r := mux.NewRouter() + + // Now we begin installing the routes. Each route corresponds to a single + // method: sum, concat, uppercase, and count. + + // addsvc routes. + { + // Each method gets constructed with a factory. Factories take an + // instance string, and return a specific endpoint. In the factory we + // dial the instance string we get from Consul, and then leverage an + // addsvc client package to construct a complete service. We can then + // leverage the addsvc.Make{Sum,Concat}Endpoint constructors to convert + // the complete service to specific endpoint. + var ( + tags = []string{} + passingOnly = true + endpoints = addendpoint.Set{} + instancer = consulsd.NewInstancer(client, logger, "addsvc", tags, passingOnly) + ) + { + factory := addsvcFactory(addendpoint.MakeSumEndpoint, tracer, zipkinTracer, logger) + endpointer := sd.NewEndpointer(instancer, factory, logger) + balancer := lb.NewRoundRobin(endpointer) + retry := lb.Retry(*retryMax, *retryTimeout, balancer) + endpoints.SumEndpoint = retry + } + { + factory := addsvcFactory(addendpoint.MakeConcatEndpoint, tracer, zipkinTracer, logger) + endpointer := sd.NewEndpointer(instancer, factory, logger) + balancer := lb.NewRoundRobin(endpointer) + retry := lb.Retry(*retryMax, *retryTimeout, balancer) + endpoints.ConcatEndpoint = retry + } + + // Here we leverage the fact that addsvc comes with a constructor for an + // HTTP handler, and just install it under a particular path prefix in + // our router. + + r.PathPrefix("/addsvc").Handler(http.StripPrefix("/addsvc", addtransport.NewHTTPHandler(endpoints, tracer, zipkinTracer, logger))) + } + + // stringsvc routes. + { + // addsvc had lots of nice importable Go packages we could leverage. + // With stringsvc we are not so fortunate, it just has some endpoints + // that we assume will exist. So we have to write that logic here. This + // is by design, so you can see two totally different methods of + // proxying to a remote service. + + var ( + tags = []string{} + passingOnly = true + uppercase endpoint.Endpoint + count endpoint.Endpoint + instancer = consulsd.NewInstancer(client, logger, "stringsvc", tags, passingOnly) + ) + { + factory := stringsvcFactory(ctx, "GET", "/uppercase") + endpointer := sd.NewEndpointer(instancer, factory, logger) + balancer := lb.NewRoundRobin(endpointer) + retry := lb.Retry(*retryMax, *retryTimeout, balancer) + uppercase = retry + } + { + factory := stringsvcFactory(ctx, "GET", "/count") + endpointer := sd.NewEndpointer(instancer, factory, logger) + balancer := lb.NewRoundRobin(endpointer) + retry := lb.Retry(*retryMax, *retryTimeout, balancer) + count = retry + } + + // We can use the transport/http.Server to act as our handler, all we + // have to do provide it with the encode and decode functions for our + // stringsvc methods. + + r.Handle("/stringsvc/uppercase", httptransport.NewServer(uppercase, decodeUppercaseRequest, encodeJSONResponse)) + r.Handle("/stringsvc/count", httptransport.NewServer(count, decodeCountRequest, encodeJSONResponse)) + } + + // Interrupt handler. + errc := make(chan error) + go func() { + c := make(chan os.Signal) + signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) + errc <- fmt.Errorf("%s", <-c) + }() + + // HTTP transport. + go func() { + logger.Log("transport", "HTTP", "addr", *httpAddr) + errc <- http.ListenAndServe(*httpAddr, r) + }() + + // Run! + logger.Log("exit", <-errc) +} + +func addsvcFactory(makeEndpoint func(addservice.Service) endpoint.Endpoint, tracer stdopentracing.Tracer, zipkinTracer *stdzipkin.Tracer, logger log.Logger) sd.Factory { + return func(instance string) (endpoint.Endpoint, io.Closer, error) { + // We could just as easily use the HTTP or Thrift client package to make + // the connection to addsvc. We've chosen gRPC arbitrarily. Note that + // the transport is an implementation detail: it doesn't leak out of + // this function. Nice! + + conn, err := grpc.Dial(instance, grpc.WithInsecure()) + if err != nil { + return nil, nil, err + } + service := addtransport.NewGRPCClient(conn, tracer, zipkinTracer, logger) + endpoint := makeEndpoint(service) + + // Notice that the addsvc gRPC client converts the connection to a + // complete addsvc, and we just throw away everything except the method + // we're interested in. A smarter factory would mux multiple methods + // over the same connection. But that would require more work to manage + // the returned io.Closer, e.g. reference counting. Since this is for + // the purposes of demonstration, we'll just keep it simple. + + return endpoint, conn, nil + } +} + +func stringsvcFactory(ctx context.Context, method, path string) sd.Factory { + return func(instance string) (endpoint.Endpoint, io.Closer, error) { + if !strings.HasPrefix(instance, "http") { + instance = "http://" + instance + } + tgt, err := url.Parse(instance) + if err != nil { + return nil, nil, err + } + tgt.Path = path + + // Since stringsvc doesn't have any kind of package we can import, or + // any formal spec, we are forced to just assert where the endpoints + // live, and write our own code to encode and decode requests and + // responses. Ideally, if you write the service, you will want to + // provide stronger guarantees to your clients. + + var ( + enc httptransport.EncodeRequestFunc + dec httptransport.DecodeResponseFunc + ) + switch path { + case "/uppercase": + enc, dec = encodeJSONRequest, decodeUppercaseResponse + case "/count": + enc, dec = encodeJSONRequest, decodeCountResponse + default: + return nil, nil, fmt.Errorf("unknown stringsvc path %q", path) + } + + return httptransport.NewClient(method, tgt, enc, dec).Endpoint(), nil, nil + } +} + +func encodeJSONRequest(_ context.Context, req *http.Request, request interface{}) error { + // Both uppercase and count requests are encoded in the same way: + // simple JSON serialization to the request body. + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(request); err != nil { + return err + } + req.Body = ioutil.NopCloser(&buf) + return nil +} + +func encodeJSONResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} + +// I've just copied these functions from stringsvc3/transport.go, inlining the +// struct definitions. + +func decodeUppercaseResponse(ctx context.Context, resp *http.Response) (interface{}, error) { + var response struct { + V string `json:"v"` + Err string `json:"err,omitempty"` + } + if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { + return nil, err + } + return response, nil +} + +func decodeCountResponse(ctx context.Context, resp *http.Response) (interface{}, error) { + var response struct { + V int `json:"v"` + } + if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { + return nil, err + } + return response, nil +} + +func decodeUppercaseRequest(ctx context.Context, req *http.Request) (interface{}, error) { + var request struct { + S string `json:"s"` + } + if err := json.NewDecoder(req.Body).Decode(&request); err != nil { + return nil, err + } + return request, nil +} + +func decodeCountRequest(ctx context.Context, req *http.Request) (interface{}, error) { + var request struct { + S string `json:"s"` + } + if err := json.NewDecoder(req.Body).Decode(&request); err != nil { + return nil, err + } + return request, nil +} diff --git a/vendor/github.com/go-kit/kit/examples/profilesvc/README.md b/vendor/github.com/go-kit/kit/examples/profilesvc/README.md new file mode 100644 index 000000000..68c41252f --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/profilesvc/README.md @@ -0,0 +1,4 @@ +# profilesvc + +This example demonstrates how to use Go kit to implement a REST-y HTTP service. +It leverages the excellent [gorilla mux package](https://github.com/gorilla/mux) for routing. diff --git a/vendor/github.com/go-kit/kit/examples/profilesvc/client/client.go b/vendor/github.com/go-kit/kit/examples/profilesvc/client/client.go new file mode 100644 index 000000000..03c1dc7a7 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/profilesvc/client/client.go @@ -0,0 +1,121 @@ +// Package client provides a profilesvc client based on a predefined Consul +// service name and relevant tags. Users must only provide the address of a +// Consul server. +package client + +import ( + "io" + "time" + + consulapi "github.com/hashicorp/consul/api" + + "github.com/go-kit/kit/endpoint" + "github.com/go-kit/kit/examples/profilesvc" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/sd" + "github.com/go-kit/kit/sd/consul" + "github.com/go-kit/kit/sd/lb" +) + +// New returns a service that's load-balanced over instances of profilesvc found +// in the provided Consul server. The mechanism of looking up profilesvc +// instances in Consul is hard-coded into the client. +func New(consulAddr string, logger log.Logger) (profilesvc.Service, error) { + apiclient, err := consulapi.NewClient(&consulapi.Config{ + Address: consulAddr, + }) + if err != nil { + return nil, err + } + + // As the implementer of profilesvc, we declare and enforce these + // parameters for all of the profilesvc consumers. + var ( + consulService = "profilesvc" + consulTags = []string{"prod"} + passingOnly = true + retryMax = 3 + retryTimeout = 500 * time.Millisecond + ) + + var ( + sdclient = consul.NewClient(apiclient) + instancer = consul.NewInstancer(sdclient, logger, consulService, consulTags, passingOnly) + endpoints profilesvc.Endpoints + ) + { + factory := factoryFor(profilesvc.MakePostProfileEndpoint) + endpointer := sd.NewEndpointer(instancer, factory, logger) + balancer := lb.NewRoundRobin(endpointer) + retry := lb.Retry(retryMax, retryTimeout, balancer) + endpoints.PostProfileEndpoint = retry + } + { + factory := factoryFor(profilesvc.MakeGetProfileEndpoint) + endpointer := sd.NewEndpointer(instancer, factory, logger) + balancer := lb.NewRoundRobin(endpointer) + retry := lb.Retry(retryMax, retryTimeout, balancer) + endpoints.GetProfileEndpoint = retry + } + { + factory := factoryFor(profilesvc.MakePutProfileEndpoint) + endpointer := sd.NewEndpointer(instancer, factory, logger) + balancer := lb.NewRoundRobin(endpointer) + retry := lb.Retry(retryMax, retryTimeout, balancer) + endpoints.PutProfileEndpoint = retry + } + { + factory := factoryFor(profilesvc.MakePatchProfileEndpoint) + endpointer := sd.NewEndpointer(instancer, factory, logger) + balancer := lb.NewRoundRobin(endpointer) + retry := lb.Retry(retryMax, retryTimeout, balancer) + endpoints.PatchProfileEndpoint = retry + } + { + factory := factoryFor(profilesvc.MakeDeleteProfileEndpoint) + endpointer := sd.NewEndpointer(instancer, factory, logger) + balancer := lb.NewRoundRobin(endpointer) + retry := lb.Retry(retryMax, retryTimeout, balancer) + endpoints.DeleteProfileEndpoint = retry + } + { + factory := factoryFor(profilesvc.MakeGetAddressesEndpoint) + endpointer := sd.NewEndpointer(instancer, factory, logger) + balancer := lb.NewRoundRobin(endpointer) + retry := lb.Retry(retryMax, retryTimeout, balancer) + endpoints.GetAddressesEndpoint = retry + } + { + factory := factoryFor(profilesvc.MakeGetAddressEndpoint) + endpointer := sd.NewEndpointer(instancer, factory, logger) + balancer := lb.NewRoundRobin(endpointer) + retry := lb.Retry(retryMax, retryTimeout, balancer) + endpoints.GetAddressEndpoint = retry + } + { + factory := factoryFor(profilesvc.MakePostAddressEndpoint) + endpointer := sd.NewEndpointer(instancer, factory, logger) + balancer := lb.NewRoundRobin(endpointer) + retry := lb.Retry(retryMax, retryTimeout, balancer) + endpoints.PostAddressEndpoint = retry + } + { + factory := factoryFor(profilesvc.MakeDeleteAddressEndpoint) + endpointer := sd.NewEndpointer(instancer, factory, logger) + balancer := lb.NewRoundRobin(endpointer) + retry := lb.Retry(retryMax, retryTimeout, balancer) + endpoints.DeleteAddressEndpoint = retry + } + + return endpoints, nil +} + +func factoryFor(makeEndpoint func(profilesvc.Service) endpoint.Endpoint) sd.Factory { + return func(instance string) (endpoint.Endpoint, io.Closer, error) { + service, err := profilesvc.MakeClientEndpoints(instance) + if err != nil { + return nil, nil, err + } + return makeEndpoint(service), nil, nil + } +} diff --git a/vendor/github.com/go-kit/kit/examples/profilesvc/cmd/profilesvc/main.go b/vendor/github.com/go-kit/kit/examples/profilesvc/cmd/profilesvc/main.go new file mode 100644 index 000000000..fda752952 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/profilesvc/cmd/profilesvc/main.go @@ -0,0 +1,52 @@ +package main + +import ( + "flag" + "fmt" + "net/http" + "os" + "os/signal" + "syscall" + + "github.com/go-kit/kit/examples/profilesvc" + "github.com/go-kit/kit/log" +) + +func main() { + var ( + httpAddr = flag.String("http.addr", ":8080", "HTTP listen address") + ) + flag.Parse() + + var logger log.Logger + { + logger = log.NewLogfmtLogger(os.Stderr) + logger = log.With(logger, "ts", log.DefaultTimestampUTC) + logger = log.With(logger, "caller", log.DefaultCaller) + } + + var s profilesvc.Service + { + s = profilesvc.NewInmemService() + s = profilesvc.LoggingMiddleware(logger)(s) + } + + var h http.Handler + { + h = profilesvc.MakeHTTPHandler(s, log.With(logger, "component", "HTTP")) + } + + errs := make(chan error) + go func() { + c := make(chan os.Signal) + signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) + errs <- fmt.Errorf("%s", <-c) + }() + + go func() { + logger.Log("transport", "HTTP", "addr", *httpAddr) + errs <- http.ListenAndServe(*httpAddr, h) + }() + + logger.Log("exit", <-errs) +} diff --git a/vendor/github.com/go-kit/kit/examples/profilesvc/endpoints.go b/vendor/github.com/go-kit/kit/examples/profilesvc/endpoints.go new file mode 100644 index 000000000..3da29b4aa --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/profilesvc/endpoints.go @@ -0,0 +1,387 @@ +package profilesvc + +import ( + "context" + "net/url" + "strings" + + "github.com/go-kit/kit/endpoint" + httptransport "github.com/go-kit/kit/transport/http" +) + +// Endpoints collects all of the endpoints that compose a profile service. It's +// meant to be used as a helper struct, to collect all of the endpoints into a +// single parameter. +// +// In a server, it's useful for functions that need to operate on a per-endpoint +// basis. For example, you might pass an Endpoints to a function that produces +// an http.Handler, with each method (endpoint) wired up to a specific path. (It +// is probably a mistake in design to invoke the Service methods on the +// Endpoints struct in a server.) +// +// In a client, it's useful to collect individually constructed endpoints into a +// single type that implements the Service interface. For example, you might +// construct individual endpoints using transport/http.NewClient, combine them +// into an Endpoints, and return it to the caller as a Service. +type Endpoints struct { + PostProfileEndpoint endpoint.Endpoint + GetProfileEndpoint endpoint.Endpoint + PutProfileEndpoint endpoint.Endpoint + PatchProfileEndpoint endpoint.Endpoint + DeleteProfileEndpoint endpoint.Endpoint + GetAddressesEndpoint endpoint.Endpoint + GetAddressEndpoint endpoint.Endpoint + PostAddressEndpoint endpoint.Endpoint + DeleteAddressEndpoint endpoint.Endpoint +} + +// MakeServerEndpoints returns an Endpoints struct where each endpoint invokes +// the corresponding method on the provided service. Useful in a profilesvc +// server. +func MakeServerEndpoints(s Service) Endpoints { + return Endpoints{ + PostProfileEndpoint: MakePostProfileEndpoint(s), + GetProfileEndpoint: MakeGetProfileEndpoint(s), + PutProfileEndpoint: MakePutProfileEndpoint(s), + PatchProfileEndpoint: MakePatchProfileEndpoint(s), + DeleteProfileEndpoint: MakeDeleteProfileEndpoint(s), + GetAddressesEndpoint: MakeGetAddressesEndpoint(s), + GetAddressEndpoint: MakeGetAddressEndpoint(s), + PostAddressEndpoint: MakePostAddressEndpoint(s), + DeleteAddressEndpoint: MakeDeleteAddressEndpoint(s), + } +} + +// MakeClientEndpoints returns an Endpoints struct where each endpoint invokes +// the corresponding method on the remote instance, via a transport/http.Client. +// Useful in a profilesvc client. +func MakeClientEndpoints(instance string) (Endpoints, error) { + if !strings.HasPrefix(instance, "http") { + instance = "http://" + instance + } + tgt, err := url.Parse(instance) + if err != nil { + return Endpoints{}, err + } + tgt.Path = "" + + options := []httptransport.ClientOption{} + + // Note that the request encoders need to modify the request URL, changing + // the path and method. That's fine: we simply need to provide specific + // encoders for each endpoint. + + return Endpoints{ + PostProfileEndpoint: httptransport.NewClient("POST", tgt, encodePostProfileRequest, decodePostProfileResponse, options...).Endpoint(), + GetProfileEndpoint: httptransport.NewClient("GET", tgt, encodeGetProfileRequest, decodeGetProfileResponse, options...).Endpoint(), + PutProfileEndpoint: httptransport.NewClient("PUT", tgt, encodePutProfileRequest, decodePutProfileResponse, options...).Endpoint(), + PatchProfileEndpoint: httptransport.NewClient("PATCH", tgt, encodePatchProfileRequest, decodePatchProfileResponse, options...).Endpoint(), + DeleteProfileEndpoint: httptransport.NewClient("DELETE", tgt, encodeDeleteProfileRequest, decodeDeleteProfileResponse, options...).Endpoint(), + GetAddressesEndpoint: httptransport.NewClient("GET", tgt, encodeGetAddressesRequest, decodeGetAddressesResponse, options...).Endpoint(), + GetAddressEndpoint: httptransport.NewClient("GET", tgt, encodeGetAddressRequest, decodeGetAddressResponse, options...).Endpoint(), + PostAddressEndpoint: httptransport.NewClient("POST", tgt, encodePostAddressRequest, decodePostAddressResponse, options...).Endpoint(), + DeleteAddressEndpoint: httptransport.NewClient("DELETE", tgt, encodeDeleteAddressRequest, decodeDeleteAddressResponse, options...).Endpoint(), + }, nil +} + +// PostProfile implements Service. Primarily useful in a client. +func (e Endpoints) PostProfile(ctx context.Context, p Profile) error { + request := postProfileRequest{Profile: p} + response, err := e.PostProfileEndpoint(ctx, request) + if err != nil { + return err + } + resp := response.(postProfileResponse) + return resp.Err +} + +// GetProfile implements Service. Primarily useful in a client. +func (e Endpoints) GetProfile(ctx context.Context, id string) (Profile, error) { + request := getProfileRequest{ID: id} + response, err := e.GetProfileEndpoint(ctx, request) + if err != nil { + return Profile{}, err + } + resp := response.(getProfileResponse) + return resp.Profile, resp.Err +} + +// PutProfile implements Service. Primarily useful in a client. +func (e Endpoints) PutProfile(ctx context.Context, id string, p Profile) error { + request := putProfileRequest{ID: id, Profile: p} + response, err := e.PutProfileEndpoint(ctx, request) + if err != nil { + return err + } + resp := response.(putProfileResponse) + return resp.Err +} + +// PatchProfile implements Service. Primarily useful in a client. +func (e Endpoints) PatchProfile(ctx context.Context, id string, p Profile) error { + request := patchProfileRequest{ID: id, Profile: p} + response, err := e.PatchProfileEndpoint(ctx, request) + if err != nil { + return err + } + resp := response.(patchProfileResponse) + return resp.Err +} + +// DeleteProfile implements Service. Primarily useful in a client. +func (e Endpoints) DeleteProfile(ctx context.Context, id string) error { + request := deleteProfileRequest{ID: id} + response, err := e.DeleteProfileEndpoint(ctx, request) + if err != nil { + return err + } + resp := response.(deleteProfileResponse) + return resp.Err +} + +// GetAddresses implements Service. Primarily useful in a client. +func (e Endpoints) GetAddresses(ctx context.Context, profileID string) ([]Address, error) { + request := getAddressesRequest{ProfileID: profileID} + response, err := e.GetAddressesEndpoint(ctx, request) + if err != nil { + return nil, err + } + resp := response.(getAddressesResponse) + return resp.Addresses, resp.Err +} + +// GetAddress implements Service. Primarily useful in a client. +func (e Endpoints) GetAddress(ctx context.Context, profileID string, addressID string) (Address, error) { + request := getAddressRequest{ProfileID: profileID, AddressID: addressID} + response, err := e.GetAddressEndpoint(ctx, request) + if err != nil { + return Address{}, err + } + resp := response.(getAddressResponse) + return resp.Address, resp.Err +} + +// PostAddress implements Service. Primarily useful in a client. +func (e Endpoints) PostAddress(ctx context.Context, profileID string, a Address) error { + request := postAddressRequest{ProfileID: profileID, Address: a} + response, err := e.PostAddressEndpoint(ctx, request) + if err != nil { + return err + } + resp := response.(postAddressResponse) + return resp.Err +} + +// DeleteAddress implements Service. Primarily useful in a client. +func (e Endpoints) DeleteAddress(ctx context.Context, profileID string, addressID string) error { + request := deleteAddressRequest{ProfileID: profileID, AddressID: addressID} + response, err := e.DeleteAddressEndpoint(ctx, request) + if err != nil { + return err + } + resp := response.(deleteAddressResponse) + return resp.Err +} + +// MakePostProfileEndpoint returns an endpoint via the passed service. +// Primarily useful in a server. +func MakePostProfileEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + req := request.(postProfileRequest) + e := s.PostProfile(ctx, req.Profile) + return postProfileResponse{Err: e}, nil + } +} + +// MakeGetProfileEndpoint returns an endpoint via the passed service. +// Primarily useful in a server. +func MakeGetProfileEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + req := request.(getProfileRequest) + p, e := s.GetProfile(ctx, req.ID) + return getProfileResponse{Profile: p, Err: e}, nil + } +} + +// MakePutProfileEndpoint returns an endpoint via the passed service. +// Primarily useful in a server. +func MakePutProfileEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + req := request.(putProfileRequest) + e := s.PutProfile(ctx, req.ID, req.Profile) + return putProfileResponse{Err: e}, nil + } +} + +// MakePatchProfileEndpoint returns an endpoint via the passed service. +// Primarily useful in a server. +func MakePatchProfileEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + req := request.(patchProfileRequest) + e := s.PatchProfile(ctx, req.ID, req.Profile) + return patchProfileResponse{Err: e}, nil + } +} + +// MakeDeleteProfileEndpoint returns an endpoint via the passed service. +// Primarily useful in a server. +func MakeDeleteProfileEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + req := request.(deleteProfileRequest) + e := s.DeleteProfile(ctx, req.ID) + return deleteProfileResponse{Err: e}, nil + } +} + +// MakeGetAddressesEndpoint returns an endpoint via the passed service. +// Primarily useful in a server. +func MakeGetAddressesEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + req := request.(getAddressesRequest) + a, e := s.GetAddresses(ctx, req.ProfileID) + return getAddressesResponse{Addresses: a, Err: e}, nil + } +} + +// MakeGetAddressEndpoint returns an endpoint via the passed service. +// Primarily useful in a server. +func MakeGetAddressEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + req := request.(getAddressRequest) + a, e := s.GetAddress(ctx, req.ProfileID, req.AddressID) + return getAddressResponse{Address: a, Err: e}, nil + } +} + +// MakePostAddressEndpoint returns an endpoint via the passed service. +// Primarily useful in a server. +func MakePostAddressEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + req := request.(postAddressRequest) + e := s.PostAddress(ctx, req.ProfileID, req.Address) + return postAddressResponse{Err: e}, nil + } +} + +// MakeDeleteAddressEndpoint returns an endpoint via the passed service. +// Primarily useful in a server. +func MakeDeleteAddressEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + req := request.(deleteAddressRequest) + e := s.DeleteAddress(ctx, req.ProfileID, req.AddressID) + return deleteAddressResponse{Err: e}, nil + } +} + +// We have two options to return errors from the business logic. +// +// We could return the error via the endpoint itself. That makes certain things +// a little bit easier, like providing non-200 HTTP responses to the client. But +// Go kit assumes that endpoint errors are (or may be treated as) +// transport-domain errors. For example, an endpoint error will count against a +// circuit breaker error count. +// +// Therefore, it's often better to return service (business logic) errors in the +// response object. This means we have to do a bit more work in the HTTP +// response encoder to detect e.g. a not-found error and provide a proper HTTP +// status code. That work is done with the errorer interface, in transport.go. +// Response types that may contain business-logic errors implement that +// interface. + +type postProfileRequest struct { + Profile Profile +} + +type postProfileResponse struct { + Err error `json:"err,omitempty"` +} + +func (r postProfileResponse) error() error { return r.Err } + +type getProfileRequest struct { + ID string +} + +type getProfileResponse struct { + Profile Profile `json:"profile,omitempty"` + Err error `json:"err,omitempty"` +} + +func (r getProfileResponse) error() error { return r.Err } + +type putProfileRequest struct { + ID string + Profile Profile +} + +type putProfileResponse struct { + Err error `json:"err,omitempty"` +} + +func (r putProfileResponse) error() error { return nil } + +type patchProfileRequest struct { + ID string + Profile Profile +} + +type patchProfileResponse struct { + Err error `json:"err,omitempty"` +} + +func (r patchProfileResponse) error() error { return r.Err } + +type deleteProfileRequest struct { + ID string +} + +type deleteProfileResponse struct { + Err error `json:"err,omitempty"` +} + +func (r deleteProfileResponse) error() error { return r.Err } + +type getAddressesRequest struct { + ProfileID string +} + +type getAddressesResponse struct { + Addresses []Address `json:"addresses,omitempty"` + Err error `json:"err,omitempty"` +} + +func (r getAddressesResponse) error() error { return r.Err } + +type getAddressRequest struct { + ProfileID string + AddressID string +} + +type getAddressResponse struct { + Address Address `json:"address,omitempty"` + Err error `json:"err,omitempty"` +} + +func (r getAddressResponse) error() error { return r.Err } + +type postAddressRequest struct { + ProfileID string + Address Address +} + +type postAddressResponse struct { + Err error `json:"err,omitempty"` +} + +func (r postAddressResponse) error() error { return r.Err } + +type deleteAddressRequest struct { + ProfileID string + AddressID string +} + +type deleteAddressResponse struct { + Err error `json:"err,omitempty"` +} + +func (r deleteAddressResponse) error() error { return r.Err } diff --git a/vendor/github.com/go-kit/kit/examples/profilesvc/middlewares.go b/vendor/github.com/go-kit/kit/examples/profilesvc/middlewares.go new file mode 100644 index 000000000..1b738f5b5 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/profilesvc/middlewares.go @@ -0,0 +1,88 @@ +package profilesvc + +import ( + "context" + "time" + + "github.com/go-kit/kit/log" +) + +// Middleware describes a service (as opposed to endpoint) middleware. +type Middleware func(Service) Service + +func LoggingMiddleware(logger log.Logger) Middleware { + return func(next Service) Service { + return &loggingMiddleware{ + next: next, + logger: logger, + } + } +} + +type loggingMiddleware struct { + next Service + logger log.Logger +} + +func (mw loggingMiddleware) PostProfile(ctx context.Context, p Profile) (err error) { + defer func(begin time.Time) { + mw.logger.Log("method", "PostProfile", "id", p.ID, "took", time.Since(begin), "err", err) + }(time.Now()) + return mw.next.PostProfile(ctx, p) +} + +func (mw loggingMiddleware) GetProfile(ctx context.Context, id string) (p Profile, err error) { + defer func(begin time.Time) { + mw.logger.Log("method", "GetProfile", "id", id, "took", time.Since(begin), "err", err) + }(time.Now()) + return mw.next.GetProfile(ctx, id) +} + +func (mw loggingMiddleware) PutProfile(ctx context.Context, id string, p Profile) (err error) { + defer func(begin time.Time) { + mw.logger.Log("method", "PutProfile", "id", id, "took", time.Since(begin), "err", err) + }(time.Now()) + return mw.next.PutProfile(ctx, id, p) +} + +func (mw loggingMiddleware) PatchProfile(ctx context.Context, id string, p Profile) (err error) { + defer func(begin time.Time) { + mw.logger.Log("method", "PatchProfile", "id", id, "took", time.Since(begin), "err", err) + }(time.Now()) + return mw.next.PatchProfile(ctx, id, p) +} + +func (mw loggingMiddleware) DeleteProfile(ctx context.Context, id string) (err error) { + defer func(begin time.Time) { + mw.logger.Log("method", "DeleteProfile", "id", id, "took", time.Since(begin), "err", err) + }(time.Now()) + return mw.next.DeleteProfile(ctx, id) +} + +func (mw loggingMiddleware) GetAddresses(ctx context.Context, profileID string) (addresses []Address, err error) { + defer func(begin time.Time) { + mw.logger.Log("method", "GetAddresses", "profileID", profileID, "took", time.Since(begin), "err", err) + }(time.Now()) + return mw.next.GetAddresses(ctx, profileID) +} + +func (mw loggingMiddleware) GetAddress(ctx context.Context, profileID string, addressID string) (a Address, err error) { + defer func(begin time.Time) { + mw.logger.Log("method", "GetAddress", "profileID", profileID, "addressID", addressID, "took", time.Since(begin), "err", err) + }(time.Now()) + return mw.next.GetAddress(ctx, profileID, addressID) +} + +func (mw loggingMiddleware) PostAddress(ctx context.Context, profileID string, a Address) (err error) { + defer func(begin time.Time) { + mw.logger.Log("method", "PostAddress", "profileID", profileID, "took", time.Since(begin), "err", err) + }(time.Now()) + return mw.next.PostAddress(ctx, profileID, a) +} + +func (mw loggingMiddleware) DeleteAddress(ctx context.Context, profileID string, addressID string) (err error) { + defer func(begin time.Time) { + mw.logger.Log("method", "DeleteAddress", "profileID", profileID, "addressID", addressID, "took", time.Since(begin), "err", err) + }(time.Now()) + return mw.next.DeleteAddress(ctx, profileID, addressID) +} diff --git a/vendor/github.com/go-kit/kit/examples/profilesvc/service.go b/vendor/github.com/go-kit/kit/examples/profilesvc/service.go new file mode 100644 index 000000000..cd34ecd9e --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/profilesvc/service.go @@ -0,0 +1,185 @@ +package profilesvc + +import ( + "context" + "errors" + "sync" +) + +// Service is a simple CRUD interface for user profiles. +type Service interface { + PostProfile(ctx context.Context, p Profile) error + GetProfile(ctx context.Context, id string) (Profile, error) + PutProfile(ctx context.Context, id string, p Profile) error + PatchProfile(ctx context.Context, id string, p Profile) error + DeleteProfile(ctx context.Context, id string) error + GetAddresses(ctx context.Context, profileID string) ([]Address, error) + GetAddress(ctx context.Context, profileID string, addressID string) (Address, error) + PostAddress(ctx context.Context, profileID string, a Address) error + DeleteAddress(ctx context.Context, profileID string, addressID string) error +} + +// Profile represents a single user profile. +// ID should be globally unique. +type Profile struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Addresses []Address `json:"addresses,omitempty"` +} + +// Address is a field of a user profile. +// ID should be unique within the profile (at a minimum). +type Address struct { + ID string `json:"id"` + Location string `json:"location,omitempty"` +} + +var ( + ErrInconsistentIDs = errors.New("inconsistent IDs") + ErrAlreadyExists = errors.New("already exists") + ErrNotFound = errors.New("not found") +) + +type inmemService struct { + mtx sync.RWMutex + m map[string]Profile +} + +func NewInmemService() Service { + return &inmemService{ + m: map[string]Profile{}, + } +} + +func (s *inmemService) PostProfile(ctx context.Context, p Profile) error { + s.mtx.Lock() + defer s.mtx.Unlock() + if _, ok := s.m[p.ID]; ok { + return ErrAlreadyExists // POST = create, don't overwrite + } + s.m[p.ID] = p + return nil +} + +func (s *inmemService) GetProfile(ctx context.Context, id string) (Profile, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + p, ok := s.m[id] + if !ok { + return Profile{}, ErrNotFound + } + return p, nil +} + +func (s *inmemService) PutProfile(ctx context.Context, id string, p Profile) error { + if id != p.ID { + return ErrInconsistentIDs + } + s.mtx.Lock() + defer s.mtx.Unlock() + s.m[id] = p // PUT = create or update + return nil +} + +func (s *inmemService) PatchProfile(ctx context.Context, id string, p Profile) error { + if p.ID != "" && id != p.ID { + return ErrInconsistentIDs + } + + s.mtx.Lock() + defer s.mtx.Unlock() + + existing, ok := s.m[id] + if !ok { + return ErrNotFound // PATCH = update existing, don't create + } + + // We assume that it's not possible to PATCH the ID, and that it's not + // possible to PATCH any field to its zero value. That is, the zero value + // means not specified. The way around this is to use e.g. Name *string in + // the Profile definition. But since this is just a demonstrative example, + // I'm leaving that out. + + if p.Name != "" { + existing.Name = p.Name + } + if len(p.Addresses) > 0 { + existing.Addresses = p.Addresses + } + s.m[id] = existing + return nil +} + +func (s *inmemService) DeleteProfile(ctx context.Context, id string) error { + s.mtx.Lock() + defer s.mtx.Unlock() + if _, ok := s.m[id]; !ok { + return ErrNotFound + } + delete(s.m, id) + return nil +} + +func (s *inmemService) GetAddresses(ctx context.Context, profileID string) ([]Address, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + p, ok := s.m[profileID] + if !ok { + return []Address{}, ErrNotFound + } + return p.Addresses, nil +} + +func (s *inmemService) GetAddress(ctx context.Context, profileID string, addressID string) (Address, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + p, ok := s.m[profileID] + if !ok { + return Address{}, ErrNotFound + } + for _, address := range p.Addresses { + if address.ID == addressID { + return address, nil + } + } + return Address{}, ErrNotFound +} + +func (s *inmemService) PostAddress(ctx context.Context, profileID string, a Address) error { + s.mtx.Lock() + defer s.mtx.Unlock() + p, ok := s.m[profileID] + if !ok { + return ErrNotFound + } + for _, address := range p.Addresses { + if address.ID == a.ID { + return ErrAlreadyExists + } + } + p.Addresses = append(p.Addresses, a) + s.m[profileID] = p + return nil +} + +func (s *inmemService) DeleteAddress(ctx context.Context, profileID string, addressID string) error { + s.mtx.Lock() + defer s.mtx.Unlock() + p, ok := s.m[profileID] + if !ok { + return ErrNotFound + } + newAddresses := make([]Address, 0, len(p.Addresses)) + for _, address := range p.Addresses { + if address.ID == addressID { + continue // delete + } + newAddresses = append(newAddresses, address) + } + if len(newAddresses) == len(p.Addresses) { + return ErrNotFound + } + p.Addresses = newAddresses + s.m[profileID] = p + return nil +} diff --git a/vendor/github.com/go-kit/kit/examples/profilesvc/transport.go b/vendor/github.com/go-kit/kit/examples/profilesvc/transport.go new file mode 100644 index 000000000..a0136ee4e --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/profilesvc/transport.go @@ -0,0 +1,400 @@ +package profilesvc + +// The profilesvc is just over HTTP, so we just have a single transport.go. + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "net/url" + + "github.com/gorilla/mux" + + "github.com/go-kit/kit/log" + httptransport "github.com/go-kit/kit/transport/http" +) + +var ( + // ErrBadRouting is returned when an expected path variable is missing. + // It always indicates programmer error. + ErrBadRouting = errors.New("inconsistent mapping between route and handler (programmer error)") +) + +// MakeHTTPHandler mounts all of the service endpoints into an http.Handler. +// Useful in a profilesvc server. +func MakeHTTPHandler(s Service, logger log.Logger) http.Handler { + r := mux.NewRouter() + e := MakeServerEndpoints(s) + options := []httptransport.ServerOption{ + httptransport.ServerErrorLogger(logger), + httptransport.ServerErrorEncoder(encodeError), + } + + // POST /profiles/ adds another profile + // GET /profiles/:id retrieves the given profile by id + // PUT /profiles/:id post updated profile information about the profile + // PATCH /profiles/:id partial updated profile information + // DELETE /profiles/:id remove the given profile + // GET /profiles/:id/addresses/ retrieve addresses associated with the profile + // GET /profiles/:id/addresses/:addressID retrieve a particular profile address + // POST /profiles/:id/addresses/ add a new address + // DELETE /profiles/:id/addresses/:addressID remove an address + + r.Methods("POST").Path("/profiles/").Handler(httptransport.NewServer( + e.PostProfileEndpoint, + decodePostProfileRequest, + encodeResponse, + options..., + )) + r.Methods("GET").Path("/profiles/{id}").Handler(httptransport.NewServer( + e.GetProfileEndpoint, + decodeGetProfileRequest, + encodeResponse, + options..., + )) + r.Methods("PUT").Path("/profiles/{id}").Handler(httptransport.NewServer( + e.PutProfileEndpoint, + decodePutProfileRequest, + encodeResponse, + options..., + )) + r.Methods("PATCH").Path("/profiles/{id}").Handler(httptransport.NewServer( + e.PatchProfileEndpoint, + decodePatchProfileRequest, + encodeResponse, + options..., + )) + r.Methods("DELETE").Path("/profiles/{id}").Handler(httptransport.NewServer( + e.DeleteProfileEndpoint, + decodeDeleteProfileRequest, + encodeResponse, + options..., + )) + r.Methods("GET").Path("/profiles/{id}/addresses/").Handler(httptransport.NewServer( + e.GetAddressesEndpoint, + decodeGetAddressesRequest, + encodeResponse, + options..., + )) + r.Methods("GET").Path("/profiles/{id}/addresses/{addressID}").Handler(httptransport.NewServer( + e.GetAddressEndpoint, + decodeGetAddressRequest, + encodeResponse, + options..., + )) + r.Methods("POST").Path("/profiles/{id}/addresses/").Handler(httptransport.NewServer( + e.PostAddressEndpoint, + decodePostAddressRequest, + encodeResponse, + options..., + )) + r.Methods("DELETE").Path("/profiles/{id}/addresses/{addressID}").Handler(httptransport.NewServer( + e.DeleteAddressEndpoint, + decodeDeleteAddressRequest, + encodeResponse, + options..., + )) + return r +} + +func decodePostProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { + var req postProfileRequest + if e := json.NewDecoder(r.Body).Decode(&req.Profile); e != nil { + return nil, e + } + return req, nil +} + +func decodeGetProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { + vars := mux.Vars(r) + id, ok := vars["id"] + if !ok { + return nil, ErrBadRouting + } + return getProfileRequest{ID: id}, nil +} + +func decodePutProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { + vars := mux.Vars(r) + id, ok := vars["id"] + if !ok { + return nil, ErrBadRouting + } + var profile Profile + if err := json.NewDecoder(r.Body).Decode(&profile); err != nil { + return nil, err + } + return putProfileRequest{ + ID: id, + Profile: profile, + }, nil +} + +func decodePatchProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { + vars := mux.Vars(r) + id, ok := vars["id"] + if !ok { + return nil, ErrBadRouting + } + var profile Profile + if err := json.NewDecoder(r.Body).Decode(&profile); err != nil { + return nil, err + } + return patchProfileRequest{ + ID: id, + Profile: profile, + }, nil +} + +func decodeDeleteProfileRequest(_ context.Context, r *http.Request) (request interface{}, err error) { + vars := mux.Vars(r) + id, ok := vars["id"] + if !ok { + return nil, ErrBadRouting + } + return deleteProfileRequest{ID: id}, nil +} + +func decodeGetAddressesRequest(_ context.Context, r *http.Request) (request interface{}, err error) { + vars := mux.Vars(r) + id, ok := vars["id"] + if !ok { + return nil, ErrBadRouting + } + return getAddressesRequest{ProfileID: id}, nil +} + +func decodeGetAddressRequest(_ context.Context, r *http.Request) (request interface{}, err error) { + vars := mux.Vars(r) + id, ok := vars["id"] + if !ok { + return nil, ErrBadRouting + } + addressID, ok := vars["addressID"] + if !ok { + return nil, ErrBadRouting + } + return getAddressRequest{ + ProfileID: id, + AddressID: addressID, + }, nil +} + +func decodePostAddressRequest(_ context.Context, r *http.Request) (request interface{}, err error) { + vars := mux.Vars(r) + id, ok := vars["id"] + if !ok { + return nil, ErrBadRouting + } + var address Address + if err := json.NewDecoder(r.Body).Decode(&address); err != nil { + return nil, err + } + return postAddressRequest{ + ProfileID: id, + Address: address, + }, nil +} + +func decodeDeleteAddressRequest(_ context.Context, r *http.Request) (request interface{}, err error) { + vars := mux.Vars(r) + id, ok := vars["id"] + if !ok { + return nil, ErrBadRouting + } + addressID, ok := vars["addressID"] + if !ok { + return nil, ErrBadRouting + } + return deleteAddressRequest{ + ProfileID: id, + AddressID: addressID, + }, nil +} + +func encodePostProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { + // r.Methods("POST").Path("/profiles/") + req.Method, req.URL.Path = "POST", "/profiles/" + return encodeRequest(ctx, req, request) +} + +func encodeGetProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { + // r.Methods("GET").Path("/profiles/{id}") + r := request.(getProfileRequest) + profileID := url.QueryEscape(r.ID) + req.Method, req.URL.Path = "GET", "/profiles/"+profileID + return encodeRequest(ctx, req, request) +} + +func encodePutProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { + // r.Methods("PUT").Path("/profiles/{id}") + r := request.(putProfileRequest) + profileID := url.QueryEscape(r.ID) + req.Method, req.URL.Path = "PUT", "/profiles/"+profileID + return encodeRequest(ctx, req, request) +} + +func encodePatchProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { + // r.Methods("PATCH").Path("/profiles/{id}") + r := request.(patchProfileRequest) + profileID := url.QueryEscape(r.ID) + req.Method, req.URL.Path = "PATCH", "/profiles/"+profileID + return encodeRequest(ctx, req, request) +} + +func encodeDeleteProfileRequest(ctx context.Context, req *http.Request, request interface{}) error { + // r.Methods("DELETE").Path("/profiles/{id}") + r := request.(deleteProfileRequest) + profileID := url.QueryEscape(r.ID) + req.Method, req.URL.Path = "DELETE", "/profiles/"+profileID + return encodeRequest(ctx, req, request) +} + +func encodeGetAddressesRequest(ctx context.Context, req *http.Request, request interface{}) error { + // r.Methods("GET").Path("/profiles/{id}/addresses/") + r := request.(getAddressesRequest) + profileID := url.QueryEscape(r.ProfileID) + req.Method, req.URL.Path = "GET", "/profiles/"+profileID+"/addresses/" + return encodeRequest(ctx, req, request) +} + +func encodeGetAddressRequest(ctx context.Context, req *http.Request, request interface{}) error { + // r.Methods("GET").Path("/profiles/{id}/addresses/{addressID}") + r := request.(getAddressRequest) + profileID := url.QueryEscape(r.ProfileID) + addressID := url.QueryEscape(r.AddressID) + req.Method, req.URL.Path = "GET", "/profiles/"+profileID+"/addresses/"+addressID + return encodeRequest(ctx, req, request) +} + +func encodePostAddressRequest(ctx context.Context, req *http.Request, request interface{}) error { + // r.Methods("POST").Path("/profiles/{id}/addresses/") + r := request.(postAddressRequest) + profileID := url.QueryEscape(r.ProfileID) + req.Method, req.URL.Path = "POST", "/profiles/"+profileID+"/addresses/" + return encodeRequest(ctx, req, request) +} + +func encodeDeleteAddressRequest(ctx context.Context, req *http.Request, request interface{}) error { + // r.Methods("DELETE").Path("/profiles/{id}/addresses/{addressID}") + r := request.(deleteAddressRequest) + profileID := url.QueryEscape(r.ProfileID) + addressID := url.QueryEscape(r.AddressID) + req.Method, req.URL.Path = "DELETE", "/profiles/"+profileID+"/addresses/"+addressID + return encodeRequest(ctx, req, request) +} + +func decodePostProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { + var response postProfileResponse + err := json.NewDecoder(resp.Body).Decode(&response) + return response, err +} + +func decodeGetProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { + var response getProfileResponse + err := json.NewDecoder(resp.Body).Decode(&response) + return response, err +} + +func decodePutProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { + var response putProfileResponse + err := json.NewDecoder(resp.Body).Decode(&response) + return response, err +} + +func decodePatchProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { + var response patchProfileResponse + err := json.NewDecoder(resp.Body).Decode(&response) + return response, err +} + +func decodeDeleteProfileResponse(_ context.Context, resp *http.Response) (interface{}, error) { + var response deleteProfileResponse + err := json.NewDecoder(resp.Body).Decode(&response) + return response, err +} + +func decodeGetAddressesResponse(_ context.Context, resp *http.Response) (interface{}, error) { + var response getAddressesResponse + err := json.NewDecoder(resp.Body).Decode(&response) + return response, err +} + +func decodeGetAddressResponse(_ context.Context, resp *http.Response) (interface{}, error) { + var response getAddressResponse + err := json.NewDecoder(resp.Body).Decode(&response) + return response, err +} + +func decodePostAddressResponse(_ context.Context, resp *http.Response) (interface{}, error) { + var response postAddressResponse + err := json.NewDecoder(resp.Body).Decode(&response) + return response, err +} + +func decodeDeleteAddressResponse(_ context.Context, resp *http.Response) (interface{}, error) { + var response deleteAddressResponse + err := json.NewDecoder(resp.Body).Decode(&response) + return response, err +} + +// errorer is implemented by all concrete response types that may contain +// errors. It allows us to change the HTTP response code without needing to +// trigger an endpoint (transport-level) error. For more information, read the +// big comment in endpoints.go. +type errorer interface { + error() error +} + +// encodeResponse is the common method to encode all response types to the +// client. I chose to do it this way because, since we're using JSON, there's no +// reason to provide anything more specific. It's certainly possible to +// specialize on a per-response (per-method) basis. +func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error { + if e, ok := response.(errorer); ok && e.error() != nil { + // Not a Go kit transport error, but a business-logic error. + // Provide those as HTTP errors. + encodeError(ctx, e.error(), w) + return nil + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} + +// encodeRequest likewise JSON-encodes the request to the HTTP request body. +// Don't use it directly as a transport/http.Client EncodeRequestFunc: +// profilesvc endpoints require mutating the HTTP method and request path. +func encodeRequest(_ context.Context, req *http.Request, request interface{}) error { + var buf bytes.Buffer + err := json.NewEncoder(&buf).Encode(request) + if err != nil { + return err + } + req.Body = ioutil.NopCloser(&buf) + return nil +} + +func encodeError(_ context.Context, err error, w http.ResponseWriter) { + if err == nil { + panic("encodeError with nil error") + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(codeFrom(err)) + json.NewEncoder(w).Encode(map[string]interface{}{ + "error": err.Error(), + }) +} + +func codeFrom(err error) int { + switch err { + case ErrNotFound: + return http.StatusNotFound + case ErrAlreadyExists, ErrInconsistentIDs: + return http.StatusBadRequest + default: + return http.StatusInternalServerError + } +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/README.md b/vendor/github.com/go-kit/kit/examples/shipping/README.md new file mode 100644 index 000000000..1a9a14eea --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/README.md @@ -0,0 +1,25 @@ +# shipping + +This example demonstrates a more real-world application consisting of multiple services. + +## Description + +The implementation is based on the container shipping domain from the [Domain Driven Design](http://www.amazon.com/Domain-Driven-Design-Tackling-Complexity-Software/dp/0321125215) book by Eric Evans, which was [originally](http://dddsample.sourceforge.net/) implemented in Java but has since been ported to Go. This example is a somewhat stripped down version to demonstrate the use of Go kit. The [original Go application](https://github.com/marcusolsson/goddd) is maintained separately and accompanied by an [AngularJS application](https://github.com/marcusolsson/dddelivery-angularjs) as well as a mock [routing service](https://github.com/marcusolsson/pathfinder). + +### Organization + +The application consists of three application services, `booking`, `handling` and `tracking`. Each of these is an individual Go kit service as seen in previous examples. + +- __booking__ - used by the shipping company to book and route cargos. +- __handling__ - used by our staff around the world to register whenever the cargo has been received, loaded etc. +- __tracking__ - used by the customer to track the cargo along the route + +There are also a few pure domain packages that contain some intricate business-logic. They provide domain objects and services that are used by each application service to provide interesting use-cases for the user. + +`inmem` contains in-memory implementations for the repositories found in the domain packages. + +The `routing` package provides a _domain service_ that is used to query an external application for possible routes. + +## Contributing + +As with all Go kit examples you are more than welcome to contribute. If you do however, please consider contributing back to the original project as well. diff --git a/vendor/github.com/go-kit/kit/examples/shipping/booking/endpoint.go b/vendor/github.com/go-kit/kit/examples/shipping/booking/endpoint.go new file mode 100644 index 000000000..abe83f751 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/booking/endpoint.go @@ -0,0 +1,139 @@ +package booking + +import ( + "context" + "time" + + "github.com/go-kit/kit/endpoint" + + "github.com/go-kit/kit/examples/shipping/cargo" + "github.com/go-kit/kit/examples/shipping/location" +) + +type bookCargoRequest struct { + Origin location.UNLocode + Destination location.UNLocode + ArrivalDeadline time.Time +} + +type bookCargoResponse struct { + ID cargo.TrackingID `json:"tracking_id,omitempty"` + Err error `json:"error,omitempty"` +} + +func (r bookCargoResponse) error() error { return r.Err } + +func makeBookCargoEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(bookCargoRequest) + id, err := s.BookNewCargo(req.Origin, req.Destination, req.ArrivalDeadline) + return bookCargoResponse{ID: id, Err: err}, nil + } +} + +type loadCargoRequest struct { + ID cargo.TrackingID +} + +type loadCargoResponse struct { + Cargo *Cargo `json:"cargo,omitempty"` + Err error `json:"error,omitempty"` +} + +func (r loadCargoResponse) error() error { return r.Err } + +func makeLoadCargoEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(loadCargoRequest) + c, err := s.LoadCargo(req.ID) + return loadCargoResponse{Cargo: &c, Err: err}, nil + } +} + +type requestRoutesRequest struct { + ID cargo.TrackingID +} + +type requestRoutesResponse struct { + Routes []cargo.Itinerary `json:"routes,omitempty"` + Err error `json:"error,omitempty"` +} + +func (r requestRoutesResponse) error() error { return r.Err } + +func makeRequestRoutesEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(requestRoutesRequest) + itin := s.RequestPossibleRoutesForCargo(req.ID) + return requestRoutesResponse{Routes: itin, Err: nil}, nil + } +} + +type assignToRouteRequest struct { + ID cargo.TrackingID + Itinerary cargo.Itinerary +} + +type assignToRouteResponse struct { + Err error `json:"error,omitempty"` +} + +func (r assignToRouteResponse) error() error { return r.Err } + +func makeAssignToRouteEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(assignToRouteRequest) + err := s.AssignCargoToRoute(req.ID, req.Itinerary) + return assignToRouteResponse{Err: err}, nil + } +} + +type changeDestinationRequest struct { + ID cargo.TrackingID + Destination location.UNLocode +} + +type changeDestinationResponse struct { + Err error `json:"error,omitempty"` +} + +func (r changeDestinationResponse) error() error { return r.Err } + +func makeChangeDestinationEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(changeDestinationRequest) + err := s.ChangeDestination(req.ID, req.Destination) + return changeDestinationResponse{Err: err}, nil + } +} + +type listCargosRequest struct{} + +type listCargosResponse struct { + Cargos []Cargo `json:"cargos,omitempty"` + Err error `json:"error,omitempty"` +} + +func (r listCargosResponse) error() error { return r.Err } + +func makeListCargosEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + _ = request.(listCargosRequest) + return listCargosResponse{Cargos: s.Cargos(), Err: nil}, nil + } +} + +type listLocationsRequest struct { +} + +type listLocationsResponse struct { + Locations []Location `json:"locations,omitempty"` + Err error `json:"error,omitempty"` +} + +func makeListLocationsEndpoint(s Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + _ = request.(listLocationsRequest) + return listLocationsResponse{Locations: s.Locations(), Err: nil}, nil + } +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/booking/instrumenting.go b/vendor/github.com/go-kit/kit/examples/shipping/booking/instrumenting.go new file mode 100644 index 000000000..b9b03b91c --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/booking/instrumenting.go @@ -0,0 +1,88 @@ +package booking + +import ( + "time" + + "github.com/go-kit/kit/metrics" + + "github.com/go-kit/kit/examples/shipping/cargo" + "github.com/go-kit/kit/examples/shipping/location" +) + +type instrumentingService struct { + requestCount metrics.Counter + requestLatency metrics.Histogram + Service +} + +// NewInstrumentingService returns an instance of an instrumenting Service. +func NewInstrumentingService(counter metrics.Counter, latency metrics.Histogram, s Service) Service { + return &instrumentingService{ + requestCount: counter, + requestLatency: latency, + Service: s, + } +} + +func (s *instrumentingService) BookNewCargo(origin, destination location.UNLocode, deadline time.Time) (cargo.TrackingID, error) { + defer func(begin time.Time) { + s.requestCount.With("method", "book").Add(1) + s.requestLatency.With("method", "book").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return s.Service.BookNewCargo(origin, destination, deadline) +} + +func (s *instrumentingService) LoadCargo(id cargo.TrackingID) (c Cargo, err error) { + defer func(begin time.Time) { + s.requestCount.With("method", "load").Add(1) + s.requestLatency.With("method", "load").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return s.Service.LoadCargo(id) +} + +func (s *instrumentingService) RequestPossibleRoutesForCargo(id cargo.TrackingID) []cargo.Itinerary { + defer func(begin time.Time) { + s.requestCount.With("method", "request_routes").Add(1) + s.requestLatency.With("method", "request_routes").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return s.Service.RequestPossibleRoutesForCargo(id) +} + +func (s *instrumentingService) AssignCargoToRoute(id cargo.TrackingID, itinerary cargo.Itinerary) (err error) { + defer func(begin time.Time) { + s.requestCount.With("method", "assign_to_route").Add(1) + s.requestLatency.With("method", "assign_to_route").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return s.Service.AssignCargoToRoute(id, itinerary) +} + +func (s *instrumentingService) ChangeDestination(id cargo.TrackingID, l location.UNLocode) (err error) { + defer func(begin time.Time) { + s.requestCount.With("method", "change_destination").Add(1) + s.requestLatency.With("method", "change_destination").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return s.Service.ChangeDestination(id, l) +} + +func (s *instrumentingService) Cargos() []Cargo { + defer func(begin time.Time) { + s.requestCount.With("method", "list_cargos").Add(1) + s.requestLatency.With("method", "list_cargos").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return s.Service.Cargos() +} + +func (s *instrumentingService) Locations() []Location { + defer func(begin time.Time) { + s.requestCount.With("method", "list_locations").Add(1) + s.requestLatency.With("method", "list_locations").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return s.Service.Locations() +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/booking/logging.go b/vendor/github.com/go-kit/kit/examples/shipping/booking/logging.go new file mode 100644 index 000000000..931d43073 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/booking/logging.go @@ -0,0 +1,102 @@ +package booking + +import ( + "time" + + "github.com/go-kit/kit/log" + + "github.com/go-kit/kit/examples/shipping/cargo" + "github.com/go-kit/kit/examples/shipping/location" +) + +type loggingService struct { + logger log.Logger + Service +} + +// NewLoggingService returns a new instance of a logging Service. +func NewLoggingService(logger log.Logger, s Service) Service { + return &loggingService{logger, s} +} + +func (s *loggingService) BookNewCargo(origin location.UNLocode, destination location.UNLocode, deadline time.Time) (id cargo.TrackingID, err error) { + defer func(begin time.Time) { + s.logger.Log( + "method", "book", + "origin", origin, + "destination", destination, + "arrival_deadline", deadline, + "took", time.Since(begin), + "err", err, + ) + }(time.Now()) + return s.Service.BookNewCargo(origin, destination, deadline) +} + +func (s *loggingService) LoadCargo(id cargo.TrackingID) (c Cargo, err error) { + defer func(begin time.Time) { + s.logger.Log( + "method", "load", + "tracking_id", id, + "took", time.Since(begin), + "err", err, + ) + }(time.Now()) + return s.Service.LoadCargo(id) +} + +func (s *loggingService) RequestPossibleRoutesForCargo(id cargo.TrackingID) []cargo.Itinerary { + defer func(begin time.Time) { + s.logger.Log( + "method", "request_routes", + "tracking_id", id, + "took", time.Since(begin), + ) + }(time.Now()) + return s.Service.RequestPossibleRoutesForCargo(id) +} + +func (s *loggingService) AssignCargoToRoute(id cargo.TrackingID, itinerary cargo.Itinerary) (err error) { + defer func(begin time.Time) { + s.logger.Log( + "method", "assign_to_route", + "tracking_id", id, + "took", time.Since(begin), + "err", err, + ) + }(time.Now()) + return s.Service.AssignCargoToRoute(id, itinerary) +} + +func (s *loggingService) ChangeDestination(id cargo.TrackingID, l location.UNLocode) (err error) { + defer func(begin time.Time) { + s.logger.Log( + "method", "change_destination", + "tracking_id", id, + "destination", l, + "took", time.Since(begin), + "err", err, + ) + }(time.Now()) + return s.Service.ChangeDestination(id, l) +} + +func (s *loggingService) Cargos() []Cargo { + defer func(begin time.Time) { + s.logger.Log( + "method", "list_cargos", + "took", time.Since(begin), + ) + }(time.Now()) + return s.Service.Cargos() +} + +func (s *loggingService) Locations() []Location { + defer func(begin time.Time) { + s.logger.Log( + "method", "list_locations", + "took", time.Since(begin), + ) + }(time.Now()) + return s.Service.Locations() +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/booking/service.go b/vendor/github.com/go-kit/kit/examples/shipping/booking/service.go new file mode 100644 index 000000000..8689a5a51 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/booking/service.go @@ -0,0 +1,197 @@ +// Package booking provides the use-case of booking a cargo. Used by views +// facing an administrator. +package booking + +import ( + "errors" + "time" + + "github.com/go-kit/kit/examples/shipping/cargo" + "github.com/go-kit/kit/examples/shipping/location" + "github.com/go-kit/kit/examples/shipping/routing" +) + +// ErrInvalidArgument is returned when one or more arguments are invalid. +var ErrInvalidArgument = errors.New("invalid argument") + +// Service is the interface that provides booking methods. +type Service interface { + // BookNewCargo registers a new cargo in the tracking system, not yet + // routed. + BookNewCargo(origin location.UNLocode, destination location.UNLocode, deadline time.Time) (cargo.TrackingID, error) + + // LoadCargo returns a read model of a cargo. + LoadCargo(id cargo.TrackingID) (Cargo, error) + + // RequestPossibleRoutesForCargo requests a list of itineraries describing + // possible routes for this cargo. + RequestPossibleRoutesForCargo(id cargo.TrackingID) []cargo.Itinerary + + // AssignCargoToRoute assigns a cargo to the route specified by the + // itinerary. + AssignCargoToRoute(id cargo.TrackingID, itinerary cargo.Itinerary) error + + // ChangeDestination changes the destination of a cargo. + ChangeDestination(id cargo.TrackingID, destination location.UNLocode) error + + // Cargos returns a list of all cargos that have been booked. + Cargos() []Cargo + + // Locations returns a list of registered locations. + Locations() []Location +} + +type service struct { + cargos cargo.Repository + locations location.Repository + handlingEvents cargo.HandlingEventRepository + routingService routing.Service +} + +func (s *service) AssignCargoToRoute(id cargo.TrackingID, itinerary cargo.Itinerary) error { + if id == "" || len(itinerary.Legs) == 0 { + return ErrInvalidArgument + } + + c, err := s.cargos.Find(id) + if err != nil { + return err + } + + c.AssignToRoute(itinerary) + + return s.cargos.Store(c) +} + +func (s *service) BookNewCargo(origin, destination location.UNLocode, deadline time.Time) (cargo.TrackingID, error) { + if origin == "" || destination == "" || deadline.IsZero() { + return "", ErrInvalidArgument + } + + id := cargo.NextTrackingID() + rs := cargo.RouteSpecification{ + Origin: origin, + Destination: destination, + ArrivalDeadline: deadline, + } + + c := cargo.New(id, rs) + + if err := s.cargos.Store(c); err != nil { + return "", err + } + + return c.TrackingID, nil +} + +func (s *service) LoadCargo(id cargo.TrackingID) (Cargo, error) { + if id == "" { + return Cargo{}, ErrInvalidArgument + } + + c, err := s.cargos.Find(id) + if err != nil { + return Cargo{}, err + } + + return assemble(c, s.handlingEvents), nil +} + +func (s *service) ChangeDestination(id cargo.TrackingID, destination location.UNLocode) error { + if id == "" || destination == "" { + return ErrInvalidArgument + } + + c, err := s.cargos.Find(id) + if err != nil { + return err + } + + l, err := s.locations.Find(destination) + if err != nil { + return err + } + + c.SpecifyNewRoute(cargo.RouteSpecification{ + Origin: c.Origin, + Destination: l.UNLocode, + ArrivalDeadline: c.RouteSpecification.ArrivalDeadline, + }) + + if err := s.cargos.Store(c); err != nil { + return err + } + + return nil +} + +func (s *service) RequestPossibleRoutesForCargo(id cargo.TrackingID) []cargo.Itinerary { + if id == "" { + return nil + } + + c, err := s.cargos.Find(id) + if err != nil { + return []cargo.Itinerary{} + } + + return s.routingService.FetchRoutesForSpecification(c.RouteSpecification) +} + +func (s *service) Cargos() []Cargo { + var result []Cargo + for _, c := range s.cargos.FindAll() { + result = append(result, assemble(c, s.handlingEvents)) + } + return result +} + +func (s *service) Locations() []Location { + var result []Location + for _, v := range s.locations.FindAll() { + result = append(result, Location{ + UNLocode: string(v.UNLocode), + Name: v.Name, + }) + } + return result +} + +// NewService creates a booking service with necessary dependencies. +func NewService(cargos cargo.Repository, locations location.Repository, events cargo.HandlingEventRepository, rs routing.Service) Service { + return &service{ + cargos: cargos, + locations: locations, + handlingEvents: events, + routingService: rs, + } +} + +// Location is a read model for booking views. +type Location struct { + UNLocode string `json:"locode"` + Name string `json:"name"` +} + +// Cargo is a read model for booking views. +type Cargo struct { + ArrivalDeadline time.Time `json:"arrival_deadline"` + Destination string `json:"destination"` + Legs []cargo.Leg `json:"legs,omitempty"` + Misrouted bool `json:"misrouted"` + Origin string `json:"origin"` + Routed bool `json:"routed"` + TrackingID string `json:"tracking_id"` +} + +func assemble(c *cargo.Cargo, events cargo.HandlingEventRepository) Cargo { + return Cargo{ + TrackingID: string(c.TrackingID), + Origin: string(c.Origin), + Destination: string(c.RouteSpecification.Destination), + Misrouted: c.Delivery.RoutingStatus == cargo.Misrouted, + Routed: !c.Itinerary.IsEmpty(), + ArrivalDeadline: c.RouteSpecification.ArrivalDeadline, + Legs: c.Itinerary.Legs, + } +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/booking/transport.go b/vendor/github.com/go-kit/kit/examples/shipping/booking/transport.go new file mode 100644 index 000000000..8cf11a26c --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/booking/transport.go @@ -0,0 +1,194 @@ +package booking + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "time" + + "github.com/gorilla/mux" + + kitlog "github.com/go-kit/kit/log" + kithttp "github.com/go-kit/kit/transport/http" + + "github.com/go-kit/kit/examples/shipping/cargo" + "github.com/go-kit/kit/examples/shipping/location" +) + +// MakeHandler returns a handler for the booking service. +func MakeHandler(bs Service, logger kitlog.Logger) http.Handler { + opts := []kithttp.ServerOption{ + kithttp.ServerErrorLogger(logger), + kithttp.ServerErrorEncoder(encodeError), + } + + bookCargoHandler := kithttp.NewServer( + makeBookCargoEndpoint(bs), + decodeBookCargoRequest, + encodeResponse, + opts..., + ) + loadCargoHandler := kithttp.NewServer( + makeLoadCargoEndpoint(bs), + decodeLoadCargoRequest, + encodeResponse, + opts..., + ) + requestRoutesHandler := kithttp.NewServer( + makeRequestRoutesEndpoint(bs), + decodeRequestRoutesRequest, + encodeResponse, + opts..., + ) + assignToRouteHandler := kithttp.NewServer( + makeAssignToRouteEndpoint(bs), + decodeAssignToRouteRequest, + encodeResponse, + opts..., + ) + changeDestinationHandler := kithttp.NewServer( + makeChangeDestinationEndpoint(bs), + decodeChangeDestinationRequest, + encodeResponse, + opts..., + ) + listCargosHandler := kithttp.NewServer( + makeListCargosEndpoint(bs), + decodeListCargosRequest, + encodeResponse, + opts..., + ) + listLocationsHandler := kithttp.NewServer( + makeListLocationsEndpoint(bs), + decodeListLocationsRequest, + encodeResponse, + opts..., + ) + + r := mux.NewRouter() + + r.Handle("/booking/v1/cargos", bookCargoHandler).Methods("POST") + r.Handle("/booking/v1/cargos", listCargosHandler).Methods("GET") + r.Handle("/booking/v1/cargos/{id}", loadCargoHandler).Methods("GET") + r.Handle("/booking/v1/cargos/{id}/request_routes", requestRoutesHandler).Methods("GET") + r.Handle("/booking/v1/cargos/{id}/assign_to_route", assignToRouteHandler).Methods("POST") + r.Handle("/booking/v1/cargos/{id}/change_destination", changeDestinationHandler).Methods("POST") + r.Handle("/booking/v1/locations", listLocationsHandler).Methods("GET") + + return r +} + +var errBadRoute = errors.New("bad route") + +func decodeBookCargoRequest(_ context.Context, r *http.Request) (interface{}, error) { + var body struct { + Origin string `json:"origin"` + Destination string `json:"destination"` + ArrivalDeadline time.Time `json:"arrival_deadline"` + } + + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + return nil, err + } + + return bookCargoRequest{ + Origin: location.UNLocode(body.Origin), + Destination: location.UNLocode(body.Destination), + ArrivalDeadline: body.ArrivalDeadline, + }, nil +} + +func decodeLoadCargoRequest(_ context.Context, r *http.Request) (interface{}, error) { + vars := mux.Vars(r) + id, ok := vars["id"] + if !ok { + return nil, errBadRoute + } + return loadCargoRequest{ID: cargo.TrackingID(id)}, nil +} + +func decodeRequestRoutesRequest(_ context.Context, r *http.Request) (interface{}, error) { + vars := mux.Vars(r) + id, ok := vars["id"] + if !ok { + return nil, errBadRoute + } + return requestRoutesRequest{ID: cargo.TrackingID(id)}, nil +} + +func decodeAssignToRouteRequest(_ context.Context, r *http.Request) (interface{}, error) { + vars := mux.Vars(r) + id, ok := vars["id"] + if !ok { + return nil, errBadRoute + } + + var itinerary cargo.Itinerary + if err := json.NewDecoder(r.Body).Decode(&itinerary); err != nil { + return nil, err + } + + return assignToRouteRequest{ + ID: cargo.TrackingID(id), + Itinerary: itinerary, + }, nil +} + +func decodeChangeDestinationRequest(_ context.Context, r *http.Request) (interface{}, error) { + vars := mux.Vars(r) + id, ok := vars["id"] + if !ok { + return nil, errBadRoute + } + + var body struct { + Destination string `json:"destination"` + } + + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + return nil, err + } + + return changeDestinationRequest{ + ID: cargo.TrackingID(id), + Destination: location.UNLocode(body.Destination), + }, nil +} + +func decodeListCargosRequest(_ context.Context, r *http.Request) (interface{}, error) { + return listCargosRequest{}, nil +} + +func decodeListLocationsRequest(_ context.Context, r *http.Request) (interface{}, error) { + return listLocationsRequest{}, nil +} + +func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error { + if e, ok := response.(errorer); ok && e.error() != nil { + encodeError(ctx, e.error(), w) + return nil + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} + +type errorer interface { + error() error +} + +// encode errors from business-logic +func encodeError(_ context.Context, err error, w http.ResponseWriter) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + switch err { + case cargo.ErrUnknown: + w.WriteHeader(http.StatusNotFound) + case ErrInvalidArgument: + w.WriteHeader(http.StatusBadRequest) + default: + w.WriteHeader(http.StatusInternalServerError) + } + json.NewEncoder(w).Encode(map[string]interface{}{ + "error": err.Error(), + }) +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/cargo/cargo.go b/vendor/github.com/go-kit/kit/examples/shipping/cargo/cargo.go new file mode 100644 index 000000000..a9440f516 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/cargo/cargo.go @@ -0,0 +1,137 @@ +// Package cargo contains the heart of the domain model. +package cargo + +import ( + "errors" + "strings" + "time" + + "github.com/pborman/uuid" + + "github.com/go-kit/kit/examples/shipping/location" +) + +// TrackingID uniquely identifies a particular cargo. +type TrackingID string + +// Cargo is the central class in the domain model. +type Cargo struct { + TrackingID TrackingID + Origin location.UNLocode + RouteSpecification RouteSpecification + Itinerary Itinerary + Delivery Delivery +} + +// SpecifyNewRoute specifies a new route for this cargo. +func (c *Cargo) SpecifyNewRoute(rs RouteSpecification) { + c.RouteSpecification = rs + c.Delivery = c.Delivery.UpdateOnRouting(c.RouteSpecification, c.Itinerary) +} + +// AssignToRoute attaches a new itinerary to this cargo. +func (c *Cargo) AssignToRoute(itinerary Itinerary) { + c.Itinerary = itinerary + c.Delivery = c.Delivery.UpdateOnRouting(c.RouteSpecification, c.Itinerary) +} + +// DeriveDeliveryProgress updates all aspects of the cargo aggregate status +// based on the current route specification, itinerary and handling of the cargo. +func (c *Cargo) DeriveDeliveryProgress(history HandlingHistory) { + c.Delivery = DeriveDeliveryFrom(c.RouteSpecification, c.Itinerary, history) +} + +// New creates a new, unrouted cargo. +func New(id TrackingID, rs RouteSpecification) *Cargo { + itinerary := Itinerary{} + history := HandlingHistory{make([]HandlingEvent, 0)} + + return &Cargo{ + TrackingID: id, + Origin: rs.Origin, + RouteSpecification: rs, + Delivery: DeriveDeliveryFrom(rs, itinerary, history), + } +} + +// Repository provides access a cargo store. +type Repository interface { + Store(cargo *Cargo) error + Find(id TrackingID) (*Cargo, error) + FindAll() []*Cargo +} + +// ErrUnknown is used when a cargo could not be found. +var ErrUnknown = errors.New("unknown cargo") + +// NextTrackingID generates a new tracking ID. +// TODO: Move to infrastructure(?) +func NextTrackingID() TrackingID { + return TrackingID(strings.Split(strings.ToUpper(uuid.New()), "-")[0]) +} + +// RouteSpecification Contains information about a route: its origin, +// destination and arrival deadline. +type RouteSpecification struct { + Origin location.UNLocode + Destination location.UNLocode + ArrivalDeadline time.Time +} + +// IsSatisfiedBy checks whether provided itinerary satisfies this +// specification. +func (s RouteSpecification) IsSatisfiedBy(itinerary Itinerary) bool { + return itinerary.Legs != nil && + s.Origin == itinerary.InitialDepartureLocation() && + s.Destination == itinerary.FinalArrivalLocation() +} + +// RoutingStatus describes status of cargo routing. +type RoutingStatus int + +// Valid routing statuses. +const ( + NotRouted RoutingStatus = iota + Misrouted + Routed +) + +func (s RoutingStatus) String() string { + switch s { + case NotRouted: + return "Not routed" + case Misrouted: + return "Misrouted" + case Routed: + return "Routed" + } + return "" +} + +// TransportStatus describes status of cargo transportation. +type TransportStatus int + +// Valid transport statuses. +const ( + NotReceived TransportStatus = iota + InPort + OnboardCarrier + Claimed + Unknown +) + +func (s TransportStatus) String() string { + switch s { + case NotReceived: + return "Not received" + case InPort: + return "In port" + case OnboardCarrier: + return "Onboard carrier" + case Claimed: + return "Claimed" + case Unknown: + return "Unknown" + } + return "" +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/cargo/delivery.go b/vendor/github.com/go-kit/kit/examples/shipping/cargo/delivery.go new file mode 100644 index 000000000..34f079dcc --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/cargo/delivery.go @@ -0,0 +1,174 @@ +package cargo + +import ( + "time" + + "github.com/go-kit/kit/examples/shipping/location" + "github.com/go-kit/kit/examples/shipping/voyage" +) + +// Delivery is the actual transportation of the cargo, as opposed to the +// customer requirement (RouteSpecification) and the plan (Itinerary). +type Delivery struct { + Itinerary Itinerary + RouteSpecification RouteSpecification + RoutingStatus RoutingStatus + TransportStatus TransportStatus + NextExpectedActivity HandlingActivity + LastEvent HandlingEvent + LastKnownLocation location.UNLocode + CurrentVoyage voyage.Number + ETA time.Time + IsMisdirected bool + IsUnloadedAtDestination bool +} + +// UpdateOnRouting creates a new delivery snapshot to reflect changes in +// routing, i.e. when the route specification or the itinerary has changed but +// no additional handling of the cargo has been performed. +func (d Delivery) UpdateOnRouting(rs RouteSpecification, itinerary Itinerary) Delivery { + return newDelivery(d.LastEvent, itinerary, rs) +} + +// IsOnTrack checks if the delivery is on track. +func (d Delivery) IsOnTrack() bool { + return d.RoutingStatus == Routed && !d.IsMisdirected +} + +// DeriveDeliveryFrom creates a new delivery snapshot based on the complete +// handling history of a cargo, as well as its route specification and +// itinerary. +func DeriveDeliveryFrom(rs RouteSpecification, itinerary Itinerary, history HandlingHistory) Delivery { + lastEvent, _ := history.MostRecentlyCompletedEvent() + return newDelivery(lastEvent, itinerary, rs) +} + +// newDelivery creates a up-to-date delivery based on an handling event, +// itinerary and a route specification. +func newDelivery(lastEvent HandlingEvent, itinerary Itinerary, rs RouteSpecification) Delivery { + var ( + routingStatus = calculateRoutingStatus(itinerary, rs) + transportStatus = calculateTransportStatus(lastEvent) + lastKnownLocation = calculateLastKnownLocation(lastEvent) + isMisdirected = calculateMisdirectedStatus(lastEvent, itinerary) + isUnloadedAtDestination = calculateUnloadedAtDestination(lastEvent, rs) + currentVoyage = calculateCurrentVoyage(transportStatus, lastEvent) + ) + + d := Delivery{ + LastEvent: lastEvent, + Itinerary: itinerary, + RouteSpecification: rs, + RoutingStatus: routingStatus, + TransportStatus: transportStatus, + LastKnownLocation: lastKnownLocation, + IsMisdirected: isMisdirected, + IsUnloadedAtDestination: isUnloadedAtDestination, + CurrentVoyage: currentVoyage, + } + + d.NextExpectedActivity = calculateNextExpectedActivity(d) + d.ETA = calculateETA(d) + + return d +} + +// Below are internal functions used when creating a new delivery. + +func calculateRoutingStatus(itinerary Itinerary, rs RouteSpecification) RoutingStatus { + if itinerary.Legs == nil { + return NotRouted + } + + if rs.IsSatisfiedBy(itinerary) { + return Routed + } + + return Misrouted +} + +func calculateMisdirectedStatus(event HandlingEvent, itinerary Itinerary) bool { + if event.Activity.Type == NotHandled { + return false + } + + return !itinerary.IsExpected(event) +} + +func calculateUnloadedAtDestination(event HandlingEvent, rs RouteSpecification) bool { + if event.Activity.Type == NotHandled { + return false + } + + return event.Activity.Type == Unload && rs.Destination == event.Activity.Location +} + +func calculateTransportStatus(event HandlingEvent) TransportStatus { + switch event.Activity.Type { + case NotHandled: + return NotReceived + case Load: + return OnboardCarrier + case Unload: + return InPort + case Receive: + return InPort + case Customs: + return InPort + case Claim: + return Claimed + } + return Unknown +} + +func calculateLastKnownLocation(event HandlingEvent) location.UNLocode { + return event.Activity.Location +} + +func calculateNextExpectedActivity(d Delivery) HandlingActivity { + if !d.IsOnTrack() { + return HandlingActivity{} + } + + switch d.LastEvent.Activity.Type { + case NotHandled: + return HandlingActivity{Type: Receive, Location: d.RouteSpecification.Origin} + case Receive: + l := d.Itinerary.Legs[0] + return HandlingActivity{Type: Load, Location: l.LoadLocation, VoyageNumber: l.VoyageNumber} + case Load: + for _, l := range d.Itinerary.Legs { + if l.LoadLocation == d.LastEvent.Activity.Location { + return HandlingActivity{Type: Unload, Location: l.UnloadLocation, VoyageNumber: l.VoyageNumber} + } + } + case Unload: + for i, l := range d.Itinerary.Legs { + if l.UnloadLocation == d.LastEvent.Activity.Location { + if i < len(d.Itinerary.Legs)-1 { + return HandlingActivity{Type: Load, Location: d.Itinerary.Legs[i+1].LoadLocation, VoyageNumber: d.Itinerary.Legs[i+1].VoyageNumber} + } + + return HandlingActivity{Type: Claim, Location: l.UnloadLocation} + } + } + } + + return HandlingActivity{} +} + +func calculateCurrentVoyage(transportStatus TransportStatus, event HandlingEvent) voyage.Number { + if transportStatus == OnboardCarrier && event.Activity.Type != NotHandled { + return event.Activity.VoyageNumber + } + + return voyage.Number("") +} + +func calculateETA(d Delivery) time.Time { + if !d.IsOnTrack() { + return time.Time{} + } + + return d.Itinerary.FinalArrivalTime() +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/cargo/handling.go b/vendor/github.com/go-kit/kit/examples/shipping/cargo/handling.go new file mode 100644 index 000000000..bec8509f6 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/cargo/handling.go @@ -0,0 +1,121 @@ +package cargo + +// TODO: It would make sense to have this in its own package. Unfortunately, +// then there would be a circular dependency between the cargo and handling +// packages since cargo.Delivery would use handling.HandlingEvent and +// handling.HandlingEvent would use cargo.TrackingID. Also, +// HandlingEventFactory depends on the cargo repository. +// +// It would make sense not having the cargo package depend on handling. + +import ( + "errors" + "time" + + "github.com/go-kit/kit/examples/shipping/location" + "github.com/go-kit/kit/examples/shipping/voyage" +) + +// HandlingActivity represents how and where a cargo can be handled, and can +// be used to express predictions about what is expected to happen to a cargo +// in the future. +type HandlingActivity struct { + Type HandlingEventType + Location location.UNLocode + VoyageNumber voyage.Number +} + +// HandlingEvent is used to register the event when, for instance, a cargo is +// unloaded from a carrier at a some location at a given time. +type HandlingEvent struct { + TrackingID TrackingID + Activity HandlingActivity +} + +// HandlingEventType describes type of a handling event. +type HandlingEventType int + +// Valid handling event types. +const ( + NotHandled HandlingEventType = iota + Load + Unload + Receive + Claim + Customs +) + +func (t HandlingEventType) String() string { + switch t { + case NotHandled: + return "Not Handled" + case Load: + return "Load" + case Unload: + return "Unload" + case Receive: + return "Receive" + case Claim: + return "Claim" + case Customs: + return "Customs" + } + + return "" +} + +// HandlingHistory is the handling history of a cargo. +type HandlingHistory struct { + HandlingEvents []HandlingEvent +} + +// MostRecentlyCompletedEvent returns most recently completed handling event. +func (h HandlingHistory) MostRecentlyCompletedEvent() (HandlingEvent, error) { + if len(h.HandlingEvents) == 0 { + return HandlingEvent{}, errors.New("delivery history is empty") + } + + return h.HandlingEvents[len(h.HandlingEvents)-1], nil +} + +// HandlingEventRepository provides access a handling event store. +type HandlingEventRepository interface { + Store(e HandlingEvent) + QueryHandlingHistory(TrackingID) HandlingHistory +} + +// HandlingEventFactory creates handling events. +type HandlingEventFactory struct { + CargoRepository Repository + VoyageRepository voyage.Repository + LocationRepository location.Repository +} + +// CreateHandlingEvent creates a validated handling event. +func (f *HandlingEventFactory) CreateHandlingEvent(registered time.Time, completed time.Time, id TrackingID, + voyageNumber voyage.Number, unLocode location.UNLocode, eventType HandlingEventType) (HandlingEvent, error) { + + if _, err := f.CargoRepository.Find(id); err != nil { + return HandlingEvent{}, err + } + + if _, err := f.VoyageRepository.Find(voyageNumber); err != nil { + // TODO: This is pretty ugly, but when creating a Receive event, the voyage number is not known. + if len(voyageNumber) > 0 { + return HandlingEvent{}, err + } + } + + if _, err := f.LocationRepository.Find(unLocode); err != nil { + return HandlingEvent{}, err + } + + return HandlingEvent{ + TrackingID: id, + Activity: HandlingActivity{ + Type: eventType, + Location: unLocode, + VoyageNumber: voyageNumber, + }, + }, nil +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/cargo/itinerary.go b/vendor/github.com/go-kit/kit/examples/shipping/cargo/itinerary.go new file mode 100644 index 000000000..6b5088ea5 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/cargo/itinerary.go @@ -0,0 +1,91 @@ +package cargo + +import ( + "time" + + "github.com/go-kit/kit/examples/shipping/location" + "github.com/go-kit/kit/examples/shipping/voyage" +) + +// Leg describes the transportation between two locations on a voyage. +type Leg struct { + VoyageNumber voyage.Number `json:"voyage_number"` + LoadLocation location.UNLocode `json:"from"` + UnloadLocation location.UNLocode `json:"to"` + LoadTime time.Time `json:"load_time"` + UnloadTime time.Time `json:"unload_time"` +} + +// NewLeg creates a new itinerary leg. +func NewLeg(voyageNumber voyage.Number, loadLocation, unloadLocation location.UNLocode, loadTime, unloadTime time.Time) Leg { + return Leg{ + VoyageNumber: voyageNumber, + LoadLocation: loadLocation, + UnloadLocation: unloadLocation, + LoadTime: loadTime, + UnloadTime: unloadTime, + } +} + +// Itinerary specifies steps required to transport a cargo from its origin to +// destination. +type Itinerary struct { + Legs []Leg `json:"legs"` +} + +// InitialDepartureLocation returns the start of the itinerary. +func (i Itinerary) InitialDepartureLocation() location.UNLocode { + if i.IsEmpty() { + return location.UNLocode("") + } + return i.Legs[0].LoadLocation +} + +// FinalArrivalLocation returns the end of the itinerary. +func (i Itinerary) FinalArrivalLocation() location.UNLocode { + if i.IsEmpty() { + return location.UNLocode("") + } + return i.Legs[len(i.Legs)-1].UnloadLocation +} + +// FinalArrivalTime returns the expected arrival time at final destination. +func (i Itinerary) FinalArrivalTime() time.Time { + return i.Legs[len(i.Legs)-1].UnloadTime +} + +// IsEmpty checks if the itinerary contains at least one leg. +func (i Itinerary) IsEmpty() bool { + return i.Legs == nil || len(i.Legs) == 0 +} + +// IsExpected checks if the given handling event is expected when executing +// this itinerary. +func (i Itinerary) IsExpected(event HandlingEvent) bool { + if i.IsEmpty() { + return true + } + + switch event.Activity.Type { + case Receive: + return i.InitialDepartureLocation() == event.Activity.Location + case Load: + for _, l := range i.Legs { + if l.LoadLocation == event.Activity.Location && l.VoyageNumber == event.Activity.VoyageNumber { + return true + } + } + return false + case Unload: + for _, l := range i.Legs { + if l.UnloadLocation == event.Activity.Location && l.VoyageNumber == event.Activity.VoyageNumber { + return true + } + } + return false + case Claim: + return i.FinalArrivalLocation() == event.Activity.Location + } + + return true +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/handling/endpoint.go b/vendor/github.com/go-kit/kit/examples/shipping/handling/endpoint.go new file mode 100644 index 000000000..555d08738 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/handling/endpoint.go @@ -0,0 +1,34 @@ +package handling + +import ( + "context" + "time" + + "github.com/go-kit/kit/endpoint" + + "github.com/go-kit/kit/examples/shipping/cargo" + "github.com/go-kit/kit/examples/shipping/location" + "github.com/go-kit/kit/examples/shipping/voyage" +) + +type registerIncidentRequest struct { + ID cargo.TrackingID + Location location.UNLocode + Voyage voyage.Number + EventType cargo.HandlingEventType + CompletionTime time.Time +} + +type registerIncidentResponse struct { + Err error `json:"error,omitempty"` +} + +func (r registerIncidentResponse) error() error { return r.Err } + +func makeRegisterIncidentEndpoint(hs Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(registerIncidentRequest) + err := hs.RegisterHandlingEvent(req.CompletionTime, req.ID, req.Voyage, req.Location, req.EventType) + return registerIncidentResponse{Err: err}, nil + } +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/handling/instrumenting.go b/vendor/github.com/go-kit/kit/examples/shipping/handling/instrumenting.go new file mode 100644 index 000000000..fecce04e1 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/handling/instrumenting.go @@ -0,0 +1,37 @@ +package handling + +import ( + "time" + + "github.com/go-kit/kit/metrics" + + "github.com/go-kit/kit/examples/shipping/cargo" + "github.com/go-kit/kit/examples/shipping/location" + "github.com/go-kit/kit/examples/shipping/voyage" +) + +type instrumentingService struct { + requestCount metrics.Counter + requestLatency metrics.Histogram + Service +} + +// NewInstrumentingService returns an instance of an instrumenting Service. +func NewInstrumentingService(counter metrics.Counter, latency metrics.Histogram, s Service) Service { + return &instrumentingService{ + requestCount: counter, + requestLatency: latency, + Service: s, + } +} + +func (s *instrumentingService) RegisterHandlingEvent(completed time.Time, id cargo.TrackingID, voyageNumber voyage.Number, + loc location.UNLocode, eventType cargo.HandlingEventType) error { + + defer func(begin time.Time) { + s.requestCount.With("method", "register_incident").Add(1) + s.requestLatency.With("method", "register_incident").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return s.Service.RegisterHandlingEvent(completed, id, voyageNumber, loc, eventType) +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/handling/logging.go b/vendor/github.com/go-kit/kit/examples/shipping/handling/logging.go new file mode 100644 index 000000000..84722fb47 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/handling/logging.go @@ -0,0 +1,38 @@ +package handling + +import ( + "time" + + "github.com/go-kit/kit/log" + + "github.com/go-kit/kit/examples/shipping/cargo" + "github.com/go-kit/kit/examples/shipping/location" + "github.com/go-kit/kit/examples/shipping/voyage" +) + +type loggingService struct { + logger log.Logger + Service +} + +// NewLoggingService returns a new instance of a logging Service. +func NewLoggingService(logger log.Logger, s Service) Service { + return &loggingService{logger, s} +} + +func (s *loggingService) RegisterHandlingEvent(completed time.Time, id cargo.TrackingID, voyageNumber voyage.Number, + unLocode location.UNLocode, eventType cargo.HandlingEventType) (err error) { + defer func(begin time.Time) { + s.logger.Log( + "method", "register_incident", + "tracking_id", id, + "location", unLocode, + "voyage", voyageNumber, + "event_type", eventType, + "completion_time", completed, + "took", time.Since(begin), + "err", err, + ) + }(time.Now()) + return s.Service.RegisterHandlingEvent(completed, id, voyageNumber, unLocode, eventType) +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/handling/service.go b/vendor/github.com/go-kit/kit/examples/shipping/handling/service.go new file mode 100644 index 000000000..83d503a2c --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/handling/service.go @@ -0,0 +1,76 @@ +// Package handling provides the use-case for registering incidents. Used by +// views facing the people handling the cargo along its route. +package handling + +import ( + "errors" + "time" + + "github.com/go-kit/kit/examples/shipping/cargo" + "github.com/go-kit/kit/examples/shipping/inspection" + "github.com/go-kit/kit/examples/shipping/location" + "github.com/go-kit/kit/examples/shipping/voyage" +) + +// ErrInvalidArgument is returned when one or more arguments are invalid. +var ErrInvalidArgument = errors.New("invalid argument") + +// EventHandler provides a means of subscribing to registered handling events. +type EventHandler interface { + CargoWasHandled(cargo.HandlingEvent) +} + +// Service provides handling operations. +type Service interface { + // RegisterHandlingEvent registers a handling event in the system, and + // notifies interested parties that a cargo has been handled. + RegisterHandlingEvent(completed time.Time, id cargo.TrackingID, voyageNumber voyage.Number, + unLocode location.UNLocode, eventType cargo.HandlingEventType) error +} + +type service struct { + handlingEventRepository cargo.HandlingEventRepository + handlingEventFactory cargo.HandlingEventFactory + handlingEventHandler EventHandler +} + +func (s *service) RegisterHandlingEvent(completed time.Time, id cargo.TrackingID, voyageNumber voyage.Number, + loc location.UNLocode, eventType cargo.HandlingEventType) error { + if completed.IsZero() || id == "" || loc == "" || eventType == cargo.NotHandled { + return ErrInvalidArgument + } + + e, err := s.handlingEventFactory.CreateHandlingEvent(time.Now(), completed, id, voyageNumber, loc, eventType) + if err != nil { + return err + } + + s.handlingEventRepository.Store(e) + s.handlingEventHandler.CargoWasHandled(e) + + return nil +} + +// NewService creates a handling event service with necessary dependencies. +func NewService(r cargo.HandlingEventRepository, f cargo.HandlingEventFactory, h EventHandler) Service { + return &service{ + handlingEventRepository: r, + handlingEventFactory: f, + handlingEventHandler: h, + } +} + +type handlingEventHandler struct { + InspectionService inspection.Service +} + +func (h *handlingEventHandler) CargoWasHandled(event cargo.HandlingEvent) { + h.InspectionService.InspectCargo(event.TrackingID) +} + +// NewEventHandler returns a new instance of a EventHandler. +func NewEventHandler(s inspection.Service) EventHandler { + return &handlingEventHandler{ + InspectionService: s, + } +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/handling/transport.go b/vendor/github.com/go-kit/kit/examples/shipping/handling/transport.go new file mode 100644 index 000000000..0e21365ba --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/handling/transport.go @@ -0,0 +1,100 @@ +package handling + +import ( + "context" + "encoding/json" + "net/http" + "time" + + "github.com/gorilla/mux" + + kitlog "github.com/go-kit/kit/log" + kithttp "github.com/go-kit/kit/transport/http" + + "github.com/go-kit/kit/examples/shipping/cargo" + "github.com/go-kit/kit/examples/shipping/location" + "github.com/go-kit/kit/examples/shipping/voyage" +) + +// MakeHandler returns a handler for the handling service. +func MakeHandler(hs Service, logger kitlog.Logger) http.Handler { + r := mux.NewRouter() + + opts := []kithttp.ServerOption{ + kithttp.ServerErrorLogger(logger), + kithttp.ServerErrorEncoder(encodeError), + } + + registerIncidentHandler := kithttp.NewServer( + makeRegisterIncidentEndpoint(hs), + decodeRegisterIncidentRequest, + encodeResponse, + opts..., + ) + + r.Handle("/handling/v1/incidents", registerIncidentHandler).Methods("POST") + + return r +} + +func decodeRegisterIncidentRequest(_ context.Context, r *http.Request) (interface{}, error) { + var body struct { + CompletionTime time.Time `json:"completion_time"` + TrackingID string `json:"tracking_id"` + VoyageNumber string `json:"voyage"` + Location string `json:"location"` + EventType string `json:"event_type"` + } + + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + return nil, err + } + + return registerIncidentRequest{ + CompletionTime: body.CompletionTime, + ID: cargo.TrackingID(body.TrackingID), + Voyage: voyage.Number(body.VoyageNumber), + Location: location.UNLocode(body.Location), + EventType: stringToEventType(body.EventType), + }, nil +} + +func stringToEventType(s string) cargo.HandlingEventType { + types := map[string]cargo.HandlingEventType{ + cargo.Receive.String(): cargo.Receive, + cargo.Load.String(): cargo.Load, + cargo.Unload.String(): cargo.Unload, + cargo.Customs.String(): cargo.Customs, + cargo.Claim.String(): cargo.Claim, + } + return types[s] +} + +func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error { + if e, ok := response.(errorer); ok && e.error() != nil { + encodeError(ctx, e.error(), w) + return nil + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} + +type errorer interface { + error() error +} + +// encode errors from business-logic +func encodeError(_ context.Context, err error, w http.ResponseWriter) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + switch err { + case cargo.ErrUnknown: + w.WriteHeader(http.StatusNotFound) + case ErrInvalidArgument: + w.WriteHeader(http.StatusBadRequest) + default: + w.WriteHeader(http.StatusInternalServerError) + } + json.NewEncoder(w).Encode(map[string]interface{}{ + "error": err.Error(), + }) +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/inmem/inmem.go b/vendor/github.com/go-kit/kit/examples/shipping/inmem/inmem.go new file mode 100644 index 000000000..f941b7ebe --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/inmem/inmem.go @@ -0,0 +1,142 @@ +// Package inmem provides in-memory implementations of all the domain repositories. +package inmem + +import ( + "sync" + + "github.com/go-kit/kit/examples/shipping/cargo" + "github.com/go-kit/kit/examples/shipping/location" + "github.com/go-kit/kit/examples/shipping/voyage" +) + +type cargoRepository struct { + mtx sync.RWMutex + cargos map[cargo.TrackingID]*cargo.Cargo +} + +func (r *cargoRepository) Store(c *cargo.Cargo) error { + r.mtx.Lock() + defer r.mtx.Unlock() + r.cargos[c.TrackingID] = c + return nil +} + +func (r *cargoRepository) Find(id cargo.TrackingID) (*cargo.Cargo, error) { + r.mtx.RLock() + defer r.mtx.RUnlock() + if val, ok := r.cargos[id]; ok { + return val, nil + } + return nil, cargo.ErrUnknown +} + +func (r *cargoRepository) FindAll() []*cargo.Cargo { + r.mtx.RLock() + defer r.mtx.RUnlock() + c := make([]*cargo.Cargo, 0, len(r.cargos)) + for _, val := range r.cargos { + c = append(c, val) + } + return c +} + +// NewCargoRepository returns a new instance of a in-memory cargo repository. +func NewCargoRepository() cargo.Repository { + return &cargoRepository{ + cargos: make(map[cargo.TrackingID]*cargo.Cargo), + } +} + +type locationRepository struct { + locations map[location.UNLocode]*location.Location +} + +func (r *locationRepository) Find(locode location.UNLocode) (*location.Location, error) { + if l, ok := r.locations[locode]; ok { + return l, nil + } + return nil, location.ErrUnknown +} + +func (r *locationRepository) FindAll() []*location.Location { + l := make([]*location.Location, 0, len(r.locations)) + for _, val := range r.locations { + l = append(l, val) + } + return l +} + +// NewLocationRepository returns a new instance of a in-memory location repository. +func NewLocationRepository() location.Repository { + r := &locationRepository{ + locations: make(map[location.UNLocode]*location.Location), + } + + r.locations[location.SESTO] = location.Stockholm + r.locations[location.AUMEL] = location.Melbourne + r.locations[location.CNHKG] = location.Hongkong + r.locations[location.JNTKO] = location.Tokyo + r.locations[location.NLRTM] = location.Rotterdam + r.locations[location.DEHAM] = location.Hamburg + + return r +} + +type voyageRepository struct { + voyages map[voyage.Number]*voyage.Voyage +} + +func (r *voyageRepository) Find(voyageNumber voyage.Number) (*voyage.Voyage, error) { + if v, ok := r.voyages[voyageNumber]; ok { + return v, nil + } + + return nil, voyage.ErrUnknown +} + +// NewVoyageRepository returns a new instance of a in-memory voyage repository. +func NewVoyageRepository() voyage.Repository { + r := &voyageRepository{ + voyages: make(map[voyage.Number]*voyage.Voyage), + } + + r.voyages[voyage.V100.Number] = voyage.V100 + r.voyages[voyage.V300.Number] = voyage.V300 + r.voyages[voyage.V400.Number] = voyage.V400 + + r.voyages[voyage.V0100S.Number] = voyage.V0100S + r.voyages[voyage.V0200T.Number] = voyage.V0200T + r.voyages[voyage.V0300A.Number] = voyage.V0300A + r.voyages[voyage.V0301S.Number] = voyage.V0301S + r.voyages[voyage.V0400S.Number] = voyage.V0400S + + return r +} + +type handlingEventRepository struct { + mtx sync.RWMutex + events map[cargo.TrackingID][]cargo.HandlingEvent +} + +func (r *handlingEventRepository) Store(e cargo.HandlingEvent) { + r.mtx.Lock() + defer r.mtx.Unlock() + // Make array if it's the first event with this tracking ID. + if _, ok := r.events[e.TrackingID]; !ok { + r.events[e.TrackingID] = make([]cargo.HandlingEvent, 0) + } + r.events[e.TrackingID] = append(r.events[e.TrackingID], e) +} + +func (r *handlingEventRepository) QueryHandlingHistory(id cargo.TrackingID) cargo.HandlingHistory { + r.mtx.RLock() + defer r.mtx.RUnlock() + return cargo.HandlingHistory{HandlingEvents: r.events[id]} +} + +// NewHandlingEventRepository returns a new instance of a in-memory handling event repository. +func NewHandlingEventRepository() cargo.HandlingEventRepository { + return &handlingEventRepository{ + events: make(map[cargo.TrackingID][]cargo.HandlingEvent), + } +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/inspection/inspection.go b/vendor/github.com/go-kit/kit/examples/shipping/inspection/inspection.go new file mode 100644 index 000000000..2ebf73d45 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/inspection/inspection.go @@ -0,0 +1,53 @@ +// Package inspection provides means to inspect cargos. +package inspection + +import ( + "github.com/go-kit/kit/examples/shipping/cargo" +) + +// EventHandler provides means of subscribing to inspection events. +type EventHandler interface { + CargoWasMisdirected(*cargo.Cargo) + CargoHasArrived(*cargo.Cargo) +} + +// Service provides cargo inspection operations. +type Service interface { + // InspectCargo inspects cargo and send relevant notifications to + // interested parties, for example if a cargo has been misdirected, or + // unloaded at the final destination. + InspectCargo(id cargo.TrackingID) +} + +type service struct { + cargos cargo.Repository + events cargo.HandlingEventRepository + handler EventHandler +} + +// TODO: Should be transactional +func (s *service) InspectCargo(id cargo.TrackingID) { + c, err := s.cargos.Find(id) + if err != nil { + return + } + + h := s.events.QueryHandlingHistory(id) + + c.DeriveDeliveryProgress(h) + + if c.Delivery.IsMisdirected { + s.handler.CargoWasMisdirected(c) + } + + if c.Delivery.IsUnloadedAtDestination { + s.handler.CargoHasArrived(c) + } + + s.cargos.Store(c) +} + +// NewService creates a inspection service with necessary dependencies. +func NewService(cargos cargo.Repository, events cargo.HandlingEventRepository, handler EventHandler) Service { + return &service{cargos, events, handler} +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/location/location.go b/vendor/github.com/go-kit/kit/examples/shipping/location/location.go new file mode 100644 index 000000000..ee2e5d45c --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/location/location.go @@ -0,0 +1,29 @@ +// Package location provides the Location aggregate. +package location + +import ( + "errors" +) + +// UNLocode is the United Nations location code that uniquely identifies a +// particular location. +// +// http://www.unece.org/cefact/locode/ +// http://www.unece.org/cefact/locode/DocColumnDescription.htm#LOCODE +type UNLocode string + +// Location is a location is our model is stops on a journey, such as cargo +// origin or destination, or carrier movement endpoints. +type Location struct { + UNLocode UNLocode + Name string +} + +// ErrUnknown is used when a location could not be found. +var ErrUnknown = errors.New("unknown location") + +// Repository provides access a location store. +type Repository interface { + Find(locode UNLocode) (*Location, error) + FindAll() []*Location +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/location/sample_locations.go b/vendor/github.com/go-kit/kit/examples/shipping/location/sample_locations.go new file mode 100644 index 000000000..7fd34efa1 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/location/sample_locations.go @@ -0,0 +1,27 @@ +package location + +// Sample UN locodes. +var ( + SESTO UNLocode = "SESTO" + AUMEL UNLocode = "AUMEL" + CNHKG UNLocode = "CNHKG" + USNYC UNLocode = "USNYC" + USCHI UNLocode = "USCHI" + JNTKO UNLocode = "JNTKO" + DEHAM UNLocode = "DEHAM" + NLRTM UNLocode = "NLRTM" + FIHEL UNLocode = "FIHEL" +) + +// Sample locations. +var ( + Stockholm = &Location{SESTO, "Stockholm"} + Melbourne = &Location{AUMEL, "Melbourne"} + Hongkong = &Location{CNHKG, "Hongkong"} + NewYork = &Location{USNYC, "New York"} + Chicago = &Location{USCHI, "Chicago"} + Tokyo = &Location{JNTKO, "Tokyo"} + Hamburg = &Location{DEHAM, "Hamburg"} + Rotterdam = &Location{NLRTM, "Rotterdam"} + Helsinki = &Location{FIHEL, "Helsinki"} +) diff --git a/vendor/github.com/go-kit/kit/examples/shipping/main.go b/vendor/github.com/go-kit/kit/examples/shipping/main.go new file mode 100644 index 000000000..8337d61da --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/main.go @@ -0,0 +1,200 @@ +package main + +import ( + "context" + "flag" + "fmt" + "net/http" + "os" + "os/signal" + "syscall" + "time" + + stdprometheus "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "github.com/go-kit/kit/log" + kitprometheus "github.com/go-kit/kit/metrics/prometheus" + + "github.com/go-kit/kit/examples/shipping/booking" + "github.com/go-kit/kit/examples/shipping/cargo" + "github.com/go-kit/kit/examples/shipping/handling" + "github.com/go-kit/kit/examples/shipping/inmem" + "github.com/go-kit/kit/examples/shipping/inspection" + "github.com/go-kit/kit/examples/shipping/location" + "github.com/go-kit/kit/examples/shipping/routing" + "github.com/go-kit/kit/examples/shipping/tracking" +) + +const ( + defaultPort = "8080" + defaultRoutingServiceURL = "http://localhost:7878" +) + +func main() { + var ( + addr = envString("PORT", defaultPort) + rsurl = envString("ROUTINGSERVICE_URL", defaultRoutingServiceURL) + + httpAddr = flag.String("http.addr", ":"+addr, "HTTP listen address") + routingServiceURL = flag.String("service.routing", rsurl, "routing service URL") + + ctx = context.Background() + ) + + flag.Parse() + + var logger log.Logger + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = log.With(logger, "ts", log.DefaultTimestampUTC) + + var ( + cargos = inmem.NewCargoRepository() + locations = inmem.NewLocationRepository() + voyages = inmem.NewVoyageRepository() + handlingEvents = inmem.NewHandlingEventRepository() + ) + + // Configure some questionable dependencies. + var ( + handlingEventFactory = cargo.HandlingEventFactory{ + CargoRepository: cargos, + VoyageRepository: voyages, + LocationRepository: locations, + } + handlingEventHandler = handling.NewEventHandler( + inspection.NewService(cargos, handlingEvents, nil), + ) + ) + + // Facilitate testing by adding some cargos. + storeTestData(cargos) + + fieldKeys := []string{"method"} + + var rs routing.Service + rs = routing.NewProxyingMiddleware(ctx, *routingServiceURL)(rs) + + var bs booking.Service + bs = booking.NewService(cargos, locations, handlingEvents, rs) + bs = booking.NewLoggingService(log.With(logger, "component", "booking"), bs) + bs = booking.NewInstrumentingService( + kitprometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: "api", + Subsystem: "booking_service", + Name: "request_count", + Help: "Number of requests received.", + }, fieldKeys), + kitprometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ + Namespace: "api", + Subsystem: "booking_service", + Name: "request_latency_microseconds", + Help: "Total duration of requests in microseconds.", + }, fieldKeys), + bs, + ) + + var ts tracking.Service + ts = tracking.NewService(cargos, handlingEvents) + ts = tracking.NewLoggingService(log.With(logger, "component", "tracking"), ts) + ts = tracking.NewInstrumentingService( + kitprometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: "api", + Subsystem: "tracking_service", + Name: "request_count", + Help: "Number of requests received.", + }, fieldKeys), + kitprometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ + Namespace: "api", + Subsystem: "tracking_service", + Name: "request_latency_microseconds", + Help: "Total duration of requests in microseconds.", + }, fieldKeys), + ts, + ) + + var hs handling.Service + hs = handling.NewService(handlingEvents, handlingEventFactory, handlingEventHandler) + hs = handling.NewLoggingService(log.With(logger, "component", "handling"), hs) + hs = handling.NewInstrumentingService( + kitprometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: "api", + Subsystem: "handling_service", + Name: "request_count", + Help: "Number of requests received.", + }, fieldKeys), + kitprometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ + Namespace: "api", + Subsystem: "handling_service", + Name: "request_latency_microseconds", + Help: "Total duration of requests in microseconds.", + }, fieldKeys), + hs, + ) + + httpLogger := log.With(logger, "component", "http") + + mux := http.NewServeMux() + + mux.Handle("/booking/v1/", booking.MakeHandler(bs, httpLogger)) + mux.Handle("/tracking/v1/", tracking.MakeHandler(ts, httpLogger)) + mux.Handle("/handling/v1/", handling.MakeHandler(hs, httpLogger)) + + http.Handle("/", accessControl(mux)) + http.Handle("/metrics", promhttp.Handler()) + + errs := make(chan error, 2) + go func() { + logger.Log("transport", "http", "address", *httpAddr, "msg", "listening") + errs <- http.ListenAndServe(*httpAddr, nil) + }() + go func() { + c := make(chan os.Signal) + signal.Notify(c, syscall.SIGINT) + errs <- fmt.Errorf("%s", <-c) + }() + + logger.Log("terminated", <-errs) +} + +func accessControl(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS") + w.Header().Set("Access-Control-Allow-Headers", "Origin, Content-Type") + + if r.Method == "OPTIONS" { + return + } + + h.ServeHTTP(w, r) + }) +} + +func envString(env, fallback string) string { + e := os.Getenv(env) + if e == "" { + return fallback + } + return e +} + +func storeTestData(r cargo.Repository) { + test1 := cargo.New("FTL456", cargo.RouteSpecification{ + Origin: location.AUMEL, + Destination: location.SESTO, + ArrivalDeadline: time.Now().AddDate(0, 0, 7), + }) + if err := r.Store(test1); err != nil { + panic(err) + } + + test2 := cargo.New("ABC123", cargo.RouteSpecification{ + Origin: location.SESTO, + Destination: location.CNHKG, + ArrivalDeadline: time.Now().AddDate(0, 0, 14), + }) + if err := r.Store(test2); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/routing/proxying.go b/vendor/github.com/go-kit/kit/examples/shipping/routing/proxying.go new file mode 100644 index 000000000..0c9150b1e --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/routing/proxying.go @@ -0,0 +1,117 @@ +package routing + +import ( + "context" + "encoding/json" + "net/http" + "net/url" + "time" + + "github.com/go-kit/kit/circuitbreaker" + "github.com/go-kit/kit/endpoint" + kithttp "github.com/go-kit/kit/transport/http" + + "github.com/go-kit/kit/examples/shipping/cargo" + "github.com/go-kit/kit/examples/shipping/location" + "github.com/go-kit/kit/examples/shipping/voyage" +) + +type proxyService struct { + context.Context + FetchRoutesEndpoint endpoint.Endpoint + Service +} + +func (s proxyService) FetchRoutesForSpecification(rs cargo.RouteSpecification) []cargo.Itinerary { + response, err := s.FetchRoutesEndpoint(s.Context, fetchRoutesRequest{ + From: string(rs.Origin), + To: string(rs.Destination), + }) + if err != nil { + return []cargo.Itinerary{} + } + + resp := response.(fetchRoutesResponse) + + var itineraries []cargo.Itinerary + for _, r := range resp.Paths { + var legs []cargo.Leg + for _, e := range r.Edges { + legs = append(legs, cargo.Leg{ + VoyageNumber: voyage.Number(e.Voyage), + LoadLocation: location.UNLocode(e.Origin), + UnloadLocation: location.UNLocode(e.Destination), + LoadTime: e.Departure, + UnloadTime: e.Arrival, + }) + } + + itineraries = append(itineraries, cargo.Itinerary{Legs: legs}) + } + + return itineraries +} + +// ServiceMiddleware defines a middleware for a routing service. +type ServiceMiddleware func(Service) Service + +// NewProxyingMiddleware returns a new instance of a proxying middleware. +func NewProxyingMiddleware(ctx context.Context, proxyURL string) ServiceMiddleware { + return func(next Service) Service { + var e endpoint.Endpoint + e = makeFetchRoutesEndpoint(ctx, proxyURL) + e = circuitbreaker.Hystrix("fetch-routes")(e) + return proxyService{ctx, e, next} + } +} + +type fetchRoutesRequest struct { + From string + To string +} + +type fetchRoutesResponse struct { + Paths []struct { + Edges []struct { + Origin string `json:"origin"` + Destination string `json:"destination"` + Voyage string `json:"voyage"` + Departure time.Time `json:"departure"` + Arrival time.Time `json:"arrival"` + } `json:"edges"` + } `json:"paths"` +} + +func makeFetchRoutesEndpoint(ctx context.Context, instance string) endpoint.Endpoint { + u, err := url.Parse(instance) + if err != nil { + panic(err) + } + if u.Path == "" { + u.Path = "/paths" + } + return kithttp.NewClient( + "GET", u, + encodeFetchRoutesRequest, + decodeFetchRoutesResponse, + ).Endpoint() +} + +func decodeFetchRoutesResponse(_ context.Context, resp *http.Response) (interface{}, error) { + var response fetchRoutesResponse + if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { + return nil, err + } + return response, nil +} + +func encodeFetchRoutesRequest(_ context.Context, r *http.Request, request interface{}) error { + req := request.(fetchRoutesRequest) + + vals := r.URL.Query() + vals.Add("from", req.From) + vals.Add("to", req.To) + r.URL.RawQuery = vals.Encode() + + return nil +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/routing/routing.go b/vendor/github.com/go-kit/kit/examples/shipping/routing/routing.go new file mode 100644 index 000000000..50496f2b8 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/routing/routing.go @@ -0,0 +1,15 @@ +// Package routing provides the routing domain service. It does not actually +// implement the routing service but merely acts as a proxy for a separate +// bounded context. +package routing + +import ( + "github.com/go-kit/kit/examples/shipping/cargo" +) + +// Service provides access to an external routing service. +type Service interface { + // FetchRoutesForSpecification finds all possible routes that satisfy a + // given specification. + FetchRoutesForSpecification(rs cargo.RouteSpecification) []cargo.Itinerary +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/tracking/endpoint.go b/vendor/github.com/go-kit/kit/examples/shipping/tracking/endpoint.go new file mode 100644 index 000000000..ddb13172f --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/tracking/endpoint.go @@ -0,0 +1,26 @@ +package tracking + +import ( + "context" + + "github.com/go-kit/kit/endpoint" +) + +type trackCargoRequest struct { + ID string +} + +type trackCargoResponse struct { + Cargo *Cargo `json:"cargo,omitempty"` + Err error `json:"error,omitempty"` +} + +func (r trackCargoResponse) error() error { return r.Err } + +func makeTrackCargoEndpoint(ts Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(trackCargoRequest) + c, err := ts.Track(req.ID) + return trackCargoResponse{Cargo: &c, Err: err}, nil + } +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/tracking/instrumenting.go b/vendor/github.com/go-kit/kit/examples/shipping/tracking/instrumenting.go new file mode 100644 index 000000000..f5dc018b0 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/tracking/instrumenting.go @@ -0,0 +1,31 @@ +package tracking + +import ( + "time" + + "github.com/go-kit/kit/metrics" +) + +type instrumentingService struct { + requestCount metrics.Counter + requestLatency metrics.Histogram + Service +} + +// NewInstrumentingService returns an instance of an instrumenting Service. +func NewInstrumentingService(counter metrics.Counter, latency metrics.Histogram, s Service) Service { + return &instrumentingService{ + requestCount: counter, + requestLatency: latency, + Service: s, + } +} + +func (s *instrumentingService) Track(id string) (Cargo, error) { + defer func(begin time.Time) { + s.requestCount.With("method", "track").Add(1) + s.requestLatency.With("method", "track").Observe(time.Since(begin).Seconds()) + }(time.Now()) + + return s.Service.Track(id) +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/tracking/logging.go b/vendor/github.com/go-kit/kit/examples/shipping/tracking/logging.go new file mode 100644 index 000000000..584aeaa4b --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/tracking/logging.go @@ -0,0 +1,24 @@ +package tracking + +import ( + "time" + + "github.com/go-kit/kit/log" +) + +type loggingService struct { + logger log.Logger + Service +} + +// NewLoggingService returns a new instance of a logging Service. +func NewLoggingService(logger log.Logger, s Service) Service { + return &loggingService{logger, s} +} + +func (s *loggingService) Track(id string) (c Cargo, err error) { + defer func(begin time.Time) { + s.logger.Log("method", "track", "tracking_id", id, "took", time.Since(begin), "err", err) + }(time.Now()) + return s.Service.Track(id) +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/tracking/service.go b/vendor/github.com/go-kit/kit/examples/shipping/tracking/service.go new file mode 100644 index 000000000..b0e360b23 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/tracking/service.go @@ -0,0 +1,163 @@ +// Package tracking provides the use-case of tracking a cargo. Used by views +// facing the end-user. +package tracking + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/go-kit/kit/examples/shipping/cargo" +) + +// ErrInvalidArgument is returned when one or more arguments are invalid. +var ErrInvalidArgument = errors.New("invalid argument") + +// Service is the interface that provides the basic Track method. +type Service interface { + // Track returns a cargo matching a tracking ID. + Track(id string) (Cargo, error) +} + +type service struct { + cargos cargo.Repository + handlingEvents cargo.HandlingEventRepository +} + +func (s *service) Track(id string) (Cargo, error) { + if id == "" { + return Cargo{}, ErrInvalidArgument + } + c, err := s.cargos.Find(cargo.TrackingID(id)) + if err != nil { + return Cargo{}, err + } + return assemble(c, s.handlingEvents), nil +} + +// NewService returns a new instance of the default Service. +func NewService(cargos cargo.Repository, events cargo.HandlingEventRepository) Service { + return &service{ + cargos: cargos, + handlingEvents: events, + } +} + +// Cargo is a read model for tracking views. +type Cargo struct { + TrackingID string `json:"tracking_id"` + StatusText string `json:"status_text"` + Origin string `json:"origin"` + Destination string `json:"destination"` + ETA time.Time `json:"eta"` + NextExpectedActivity string `json:"next_expected_activity"` + ArrivalDeadline time.Time `json:"arrival_deadline"` + Events []Event `json:"events"` +} + +// Leg is a read model for booking views. +type Leg struct { + VoyageNumber string `json:"voyage_number"` + From string `json:"from"` + To string `json:"to"` + LoadTime time.Time `json:"load_time"` + UnloadTime time.Time `json:"unload_time"` +} + +// Event is a read model for tracking views. +type Event struct { + Description string `json:"description"` + Expected bool `json:"expected"` +} + +func assemble(c *cargo.Cargo, events cargo.HandlingEventRepository) Cargo { + return Cargo{ + TrackingID: string(c.TrackingID), + Origin: string(c.Origin), + Destination: string(c.RouteSpecification.Destination), + ETA: c.Delivery.ETA, + NextExpectedActivity: nextExpectedActivity(c), + ArrivalDeadline: c.RouteSpecification.ArrivalDeadline, + StatusText: assembleStatusText(c), + Events: assembleEvents(c, events), + } +} + +func assembleLegs(c cargo.Cargo) []Leg { + var legs []Leg + for _, l := range c.Itinerary.Legs { + legs = append(legs, Leg{ + VoyageNumber: string(l.VoyageNumber), + From: string(l.LoadLocation), + To: string(l.UnloadLocation), + LoadTime: l.LoadTime, + UnloadTime: l.UnloadTime, + }) + } + return legs +} + +func nextExpectedActivity(c *cargo.Cargo) string { + a := c.Delivery.NextExpectedActivity + prefix := "Next expected activity is to" + + switch a.Type { + case cargo.Load: + return fmt.Sprintf("%s %s cargo onto voyage %s in %s.", prefix, strings.ToLower(a.Type.String()), a.VoyageNumber, a.Location) + case cargo.Unload: + return fmt.Sprintf("%s %s cargo off of voyage %s in %s.", prefix, strings.ToLower(a.Type.String()), a.VoyageNumber, a.Location) + case cargo.NotHandled: + return "There are currently no expected activities for this cargo." + } + + return fmt.Sprintf("%s %s cargo in %s.", prefix, strings.ToLower(a.Type.String()), a.Location) +} + +func assembleStatusText(c *cargo.Cargo) string { + switch c.Delivery.TransportStatus { + case cargo.NotReceived: + return "Not received" + case cargo.InPort: + return fmt.Sprintf("In port %s", c.Delivery.LastKnownLocation) + case cargo.OnboardCarrier: + return fmt.Sprintf("Onboard voyage %s", c.Delivery.CurrentVoyage) + case cargo.Claimed: + return "Claimed" + default: + return "Unknown" + } +} + +func assembleEvents(c *cargo.Cargo, handlingEvents cargo.HandlingEventRepository) []Event { + h := handlingEvents.QueryHandlingHistory(c.TrackingID) + + var events []Event + for _, e := range h.HandlingEvents { + var description string + + switch e.Activity.Type { + case cargo.NotHandled: + description = "Cargo has not yet been received." + case cargo.Receive: + description = fmt.Sprintf("Received in %s, at %s", e.Activity.Location, time.Now().Format(time.RFC3339)) + case cargo.Load: + description = fmt.Sprintf("Loaded onto voyage %s in %s, at %s.", e.Activity.VoyageNumber, e.Activity.Location, time.Now().Format(time.RFC3339)) + case cargo.Unload: + description = fmt.Sprintf("Unloaded off voyage %s in %s, at %s.", e.Activity.VoyageNumber, e.Activity.Location, time.Now().Format(time.RFC3339)) + case cargo.Claim: + description = fmt.Sprintf("Claimed in %s, at %s.", e.Activity.Location, time.Now().Format(time.RFC3339)) + case cargo.Customs: + description = fmt.Sprintf("Cleared customs in %s, at %s.", e.Activity.Location, time.Now().Format(time.RFC3339)) + default: + description = "[Unknown status]" + } + + events = append(events, Event{ + Description: description, + Expected: c.Itinerary.IsExpected(e), + }) + } + + return events +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/tracking/transport.go b/vendor/github.com/go-kit/kit/examples/shipping/tracking/transport.go new file mode 100644 index 000000000..32db97167 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/tracking/transport.go @@ -0,0 +1,74 @@ +package tracking + +import ( + "context" + "encoding/json" + "errors" + "net/http" + + "github.com/gorilla/mux" + + kitlog "github.com/go-kit/kit/log" + kithttp "github.com/go-kit/kit/transport/http" + + "github.com/go-kit/kit/examples/shipping/cargo" +) + +// MakeHandler returns a handler for the tracking service. +func MakeHandler(ts Service, logger kitlog.Logger) http.Handler { + r := mux.NewRouter() + + opts := []kithttp.ServerOption{ + kithttp.ServerErrorLogger(logger), + kithttp.ServerErrorEncoder(encodeError), + } + + trackCargoHandler := kithttp.NewServer( + makeTrackCargoEndpoint(ts), + decodeTrackCargoRequest, + encodeResponse, + opts..., + ) + + r.Handle("/tracking/v1/cargos/{id}", trackCargoHandler).Methods("GET") + + return r +} + +func decodeTrackCargoRequest(_ context.Context, r *http.Request) (interface{}, error) { + vars := mux.Vars(r) + id, ok := vars["id"] + if !ok { + return nil, errors.New("bad route") + } + return trackCargoRequest{ID: id}, nil +} + +func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error { + if e, ok := response.(errorer); ok && e.error() != nil { + encodeError(ctx, e.error(), w) + return nil + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + return json.NewEncoder(w).Encode(response) +} + +type errorer interface { + error() error +} + +// encode errors from business-logic +func encodeError(_ context.Context, err error, w http.ResponseWriter) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + switch err { + case cargo.ErrUnknown: + w.WriteHeader(http.StatusNotFound) + case ErrInvalidArgument: + w.WriteHeader(http.StatusBadRequest) + default: + w.WriteHeader(http.StatusInternalServerError) + } + json.NewEncoder(w).Encode(map[string]interface{}{ + "error": err.Error(), + }) +} diff --git a/vendor/github.com/go-kit/kit/examples/shipping/voyage/sample_voyages.go b/vendor/github.com/go-kit/kit/examples/shipping/voyage/sample_voyages.go new file mode 100644 index 000000000..751f58852 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/voyage/sample_voyages.go @@ -0,0 +1,40 @@ +package voyage + +import "github.com/go-kit/kit/examples/shipping/location" + +// A set of sample voyages. +var ( + V100 = New("V100", Schedule{ + []CarrierMovement{ + {DepartureLocation: location.CNHKG, ArrivalLocation: location.JNTKO}, + {DepartureLocation: location.JNTKO, ArrivalLocation: location.USNYC}, + }, + }) + + V300 = New("V300", Schedule{ + []CarrierMovement{ + {DepartureLocation: location.JNTKO, ArrivalLocation: location.NLRTM}, + {DepartureLocation: location.NLRTM, ArrivalLocation: location.DEHAM}, + {DepartureLocation: location.DEHAM, ArrivalLocation: location.AUMEL}, + {DepartureLocation: location.AUMEL, ArrivalLocation: location.JNTKO}, + }, + }) + + V400 = New("V400", Schedule{ + []CarrierMovement{ + {DepartureLocation: location.DEHAM, ArrivalLocation: location.SESTO}, + {DepartureLocation: location.SESTO, ArrivalLocation: location.FIHEL}, + {DepartureLocation: location.FIHEL, ArrivalLocation: location.DEHAM}, + }, + }) +) + +// These voyages are hard-coded into the current pathfinder. Make sure +// they exist. +var ( + V0100S = New("0100S", Schedule{[]CarrierMovement{}}) + V0200T = New("0200T", Schedule{[]CarrierMovement{}}) + V0300A = New("0300A", Schedule{[]CarrierMovement{}}) + V0301S = New("0301S", Schedule{[]CarrierMovement{}}) + V0400S = New("0400S", Schedule{[]CarrierMovement{}}) +) diff --git a/vendor/github.com/go-kit/kit/examples/shipping/voyage/voyage.go b/vendor/github.com/go-kit/kit/examples/shipping/voyage/voyage.go new file mode 100644 index 000000000..37366af42 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/shipping/voyage/voyage.go @@ -0,0 +1,44 @@ +// Package voyage provides the Voyage aggregate. +package voyage + +import ( + "errors" + "time" + + "github.com/go-kit/kit/examples/shipping/location" +) + +// Number uniquely identifies a particular Voyage. +type Number string + +// Voyage is a uniquely identifiable series of carrier movements. +type Voyage struct { + Number Number + Schedule Schedule +} + +// New creates a voyage with a voyage number and a provided schedule. +func New(n Number, s Schedule) *Voyage { + return &Voyage{Number: n, Schedule: s} +} + +// Schedule describes a voyage schedule. +type Schedule struct { + CarrierMovements []CarrierMovement +} + +// CarrierMovement is a vessel voyage from one location to another. +type CarrierMovement struct { + DepartureLocation location.UNLocode + ArrivalLocation location.UNLocode + DepartureTime time.Time + ArrivalTime time.Time +} + +// ErrUnknown is used when a voyage could not be found. +var ErrUnknown = errors.New("unknown voyage") + +// Repository provides access a voyage store. +type Repository interface { + Find(Number) (*Voyage, error) +} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc1/main.go b/vendor/github.com/go-kit/kit/examples/stringsvc1/main.go new file mode 100644 index 000000000..7649a385d --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/stringsvc1/main.go @@ -0,0 +1,115 @@ +package main + +import ( + "context" + "encoding/json" + "errors" + "log" + "net/http" + "strings" + + "github.com/go-kit/kit/endpoint" + httptransport "github.com/go-kit/kit/transport/http" +) + +// StringService provides operations on strings. +type StringService interface { + Uppercase(context.Context, string) (string, error) + Count(context.Context, string) int +} + +// stringService is a concrete implementation of StringService +type stringService struct{} + +func (stringService) Uppercase(_ context.Context, s string) (string, error) { + if s == "" { + return "", ErrEmpty + } + return strings.ToUpper(s), nil +} + +func (stringService) Count(_ context.Context, s string) int { + return len(s) +} + +// ErrEmpty is returned when an input string is empty. +var ErrEmpty = errors.New("empty string") + +// For each method, we define request and response structs +type uppercaseRequest struct { + S string `json:"s"` +} + +type uppercaseResponse struct { + V string `json:"v"` + Err string `json:"err,omitempty"` // errors don't define JSON marshaling +} + +type countRequest struct { + S string `json:"s"` +} + +type countResponse struct { + V int `json:"v"` +} + +// Endpoints are a primary abstraction in go-kit. An endpoint represents a single RPC (method in our service interface) +func makeUppercaseEndpoint(svc StringService) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(uppercaseRequest) + v, err := svc.Uppercase(ctx, req.S) + if err != nil { + return uppercaseResponse{v, err.Error()}, nil + } + return uppercaseResponse{v, ""}, nil + } +} + +func makeCountEndpoint(svc StringService) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(countRequest) + v := svc.Count(ctx, req.S) + return countResponse{v}, nil + } +} + +// Transports expose the service to the network. In this first example we utilize JSON over HTTP. +func main() { + svc := stringService{} + + uppercaseHandler := httptransport.NewServer( + makeUppercaseEndpoint(svc), + decodeUppercaseRequest, + encodeResponse, + ) + + countHandler := httptransport.NewServer( + makeCountEndpoint(svc), + decodeCountRequest, + encodeResponse, + ) + + http.Handle("/uppercase", uppercaseHandler) + http.Handle("/count", countHandler) + log.Fatal(http.ListenAndServe(":8080", nil)) +} + +func decodeUppercaseRequest(_ context.Context, r *http.Request) (interface{}, error) { + var request uppercaseRequest + if err := json.NewDecoder(r.Body).Decode(&request); err != nil { + return nil, err + } + return request, nil +} + +func decodeCountRequest(_ context.Context, r *http.Request) (interface{}, error) { + var request countRequest + if err := json.NewDecoder(r.Body).Decode(&request); err != nil { + return nil, err + } + return request, nil +} + +func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + return json.NewEncoder(w).Encode(response) +} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc2/instrumenting.go b/vendor/github.com/go-kit/kit/examples/stringsvc2/instrumenting.go new file mode 100644 index 000000000..675617d9c --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/stringsvc2/instrumenting.go @@ -0,0 +1,38 @@ +package main + +import ( + "fmt" + "time" + + "github.com/go-kit/kit/metrics" +) + +type instrumentingMiddleware struct { + requestCount metrics.Counter + requestLatency metrics.Histogram + countResult metrics.Histogram + next StringService +} + +func (mw instrumentingMiddleware) Uppercase(s string) (output string, err error) { + defer func(begin time.Time) { + lvs := []string{"method", "uppercase", "error", fmt.Sprint(err != nil)} + mw.requestCount.With(lvs...).Add(1) + mw.requestLatency.With(lvs...).Observe(time.Since(begin).Seconds()) + }(time.Now()) + + output, err = mw.next.Uppercase(s) + return +} + +func (mw instrumentingMiddleware) Count(s string) (n int) { + defer func(begin time.Time) { + lvs := []string{"method", "count", "error", "false"} + mw.requestCount.With(lvs...).Add(1) + mw.requestLatency.With(lvs...).Observe(time.Since(begin).Seconds()) + mw.countResult.Observe(float64(n)) + }(time.Now()) + + n = mw.next.Count(s) + return +} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc2/logging.go b/vendor/github.com/go-kit/kit/examples/stringsvc2/logging.go new file mode 100644 index 000000000..b958f3b6f --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/stringsvc2/logging.go @@ -0,0 +1,41 @@ +package main + +import ( + "time" + + "github.com/go-kit/kit/log" +) + +type loggingMiddleware struct { + logger log.Logger + next StringService +} + +func (mw loggingMiddleware) Uppercase(s string) (output string, err error) { + defer func(begin time.Time) { + _ = mw.logger.Log( + "method", "uppercase", + "input", s, + "output", output, + "err", err, + "took", time.Since(begin), + ) + }(time.Now()) + + output, err = mw.next.Uppercase(s) + return +} + +func (mw loggingMiddleware) Count(s string) (n int) { + defer func(begin time.Time) { + _ = mw.logger.Log( + "method", "count", + "input", s, + "n", n, + "took", time.Since(begin), + ) + }(time.Now()) + + n = mw.next.Count(s) + return +} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc2/main.go b/vendor/github.com/go-kit/kit/examples/stringsvc2/main.go new file mode 100644 index 000000000..60544f276 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/stringsvc2/main.go @@ -0,0 +1,60 @@ +package main + +import ( + "net/http" + "os" + + stdprometheus "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "github.com/go-kit/kit/log" + kitprometheus "github.com/go-kit/kit/metrics/prometheus" + httptransport "github.com/go-kit/kit/transport/http" +) + +func main() { + logger := log.NewLogfmtLogger(os.Stderr) + + fieldKeys := []string{"method", "error"} + requestCount := kitprometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: "my_group", + Subsystem: "string_service", + Name: "request_count", + Help: "Number of requests received.", + }, fieldKeys) + requestLatency := kitprometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ + Namespace: "my_group", + Subsystem: "string_service", + Name: "request_latency_microseconds", + Help: "Total duration of requests in microseconds.", + }, fieldKeys) + countResult := kitprometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ + Namespace: "my_group", + Subsystem: "string_service", + Name: "count_result", + Help: "The result of each count method.", + }, []string{}) // no fields here + + var svc StringService + svc = stringService{} + svc = loggingMiddleware{logger, svc} + svc = instrumentingMiddleware{requestCount, requestLatency, countResult, svc} + + uppercaseHandler := httptransport.NewServer( + makeUppercaseEndpoint(svc), + decodeUppercaseRequest, + encodeResponse, + ) + + countHandler := httptransport.NewServer( + makeCountEndpoint(svc), + decodeCountRequest, + encodeResponse, + ) + + http.Handle("/uppercase", uppercaseHandler) + http.Handle("/count", countHandler) + http.Handle("/metrics", promhttp.Handler()) + logger.Log("msg", "HTTP", "addr", ":8080") + logger.Log("err", http.ListenAndServe(":8080", nil)) +} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc2/service.go b/vendor/github.com/go-kit/kit/examples/stringsvc2/service.go new file mode 100644 index 000000000..1da2f3ebb --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/stringsvc2/service.go @@ -0,0 +1,28 @@ +package main + +import ( + "errors" + "strings" +) + +// StringService provides operations on strings. +type StringService interface { + Uppercase(string) (string, error) + Count(string) int +} + +type stringService struct{} + +func (stringService) Uppercase(s string) (string, error) { + if s == "" { + return "", ErrEmpty + } + return strings.ToUpper(s), nil +} + +func (stringService) Count(s string) int { + return len(s) +} + +// ErrEmpty is returned when an input string is empty. +var ErrEmpty = errors.New("empty string") diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc2/transport.go b/vendor/github.com/go-kit/kit/examples/stringsvc2/transport.go new file mode 100644 index 000000000..297b3ffeb --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/stringsvc2/transport.go @@ -0,0 +1,65 @@ +package main + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/go-kit/kit/endpoint" +) + +func makeUppercaseEndpoint(svc StringService) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(uppercaseRequest) + v, err := svc.Uppercase(req.S) + if err != nil { + return uppercaseResponse{v, err.Error()}, nil + } + return uppercaseResponse{v, ""}, nil + } +} + +func makeCountEndpoint(svc StringService) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(countRequest) + v := svc.Count(req.S) + return countResponse{v}, nil + } +} + +func decodeUppercaseRequest(_ context.Context, r *http.Request) (interface{}, error) { + var request uppercaseRequest + if err := json.NewDecoder(r.Body).Decode(&request); err != nil { + return nil, err + } + return request, nil +} + +func decodeCountRequest(_ context.Context, r *http.Request) (interface{}, error) { + var request countRequest + if err := json.NewDecoder(r.Body).Decode(&request); err != nil { + return nil, err + } + return request, nil +} + +func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + return json.NewEncoder(w).Encode(response) +} + +type uppercaseRequest struct { + S string `json:"s"` +} + +type uppercaseResponse struct { + V string `json:"v"` + Err string `json:"err,omitempty"` +} + +type countRequest struct { + S string `json:"s"` +} + +type countResponse struct { + V int `json:"v"` +} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc3/instrumenting.go b/vendor/github.com/go-kit/kit/examples/stringsvc3/instrumenting.go new file mode 100644 index 000000000..b21c016cf --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/stringsvc3/instrumenting.go @@ -0,0 +1,48 @@ +package main + +import ( + "fmt" + "time" + + "github.com/go-kit/kit/metrics" +) + +func instrumentingMiddleware( + requestCount metrics.Counter, + requestLatency metrics.Histogram, + countResult metrics.Histogram, +) ServiceMiddleware { + return func(next StringService) StringService { + return instrmw{requestCount, requestLatency, countResult, next} + } +} + +type instrmw struct { + requestCount metrics.Counter + requestLatency metrics.Histogram + countResult metrics.Histogram + StringService +} + +func (mw instrmw) Uppercase(s string) (output string, err error) { + defer func(begin time.Time) { + lvs := []string{"method", "uppercase", "error", fmt.Sprint(err != nil)} + mw.requestCount.With(lvs...).Add(1) + mw.requestLatency.With(lvs...).Observe(time.Since(begin).Seconds()) + }(time.Now()) + + output, err = mw.StringService.Uppercase(s) + return +} + +func (mw instrmw) Count(s string) (n int) { + defer func(begin time.Time) { + lvs := []string{"method", "count", "error", "false"} + mw.requestCount.With(lvs...).Add(1) + mw.requestLatency.With(lvs...).Observe(time.Since(begin).Seconds()) + mw.countResult.Observe(float64(n)) + }(time.Now()) + + n = mw.StringService.Count(s) + return +} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc3/logging.go b/vendor/github.com/go-kit/kit/examples/stringsvc3/logging.go new file mode 100644 index 000000000..72a2709de --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/stringsvc3/logging.go @@ -0,0 +1,47 @@ +package main + +import ( + "time" + + "github.com/go-kit/kit/log" +) + +func loggingMiddleware(logger log.Logger) ServiceMiddleware { + return func(next StringService) StringService { + return logmw{logger, next} + } +} + +type logmw struct { + logger log.Logger + StringService +} + +func (mw logmw) Uppercase(s string) (output string, err error) { + defer func(begin time.Time) { + _ = mw.logger.Log( + "method", "uppercase", + "input", s, + "output", output, + "err", err, + "took", time.Since(begin), + ) + }(time.Now()) + + output, err = mw.StringService.Uppercase(s) + return +} + +func (mw logmw) Count(s string) (n int) { + defer func(begin time.Time) { + _ = mw.logger.Log( + "method", "count", + "input", s, + "n", n, + "took", time.Since(begin), + ) + }(time.Now()) + + n = mw.StringService.Count(s) + return +} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc3/main.go b/vendor/github.com/go-kit/kit/examples/stringsvc3/main.go new file mode 100644 index 000000000..5cdb43b1a --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/stringsvc3/main.go @@ -0,0 +1,70 @@ +package main + +import ( + "context" + "flag" + "net/http" + "os" + + stdprometheus "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "github.com/go-kit/kit/log" + kitprometheus "github.com/go-kit/kit/metrics/prometheus" + httptransport "github.com/go-kit/kit/transport/http" +) + +func main() { + var ( + listen = flag.String("listen", ":8080", "HTTP listen address") + proxy = flag.String("proxy", "", "Optional comma-separated list of URLs to proxy uppercase requests") + ) + flag.Parse() + + var logger log.Logger + logger = log.NewLogfmtLogger(os.Stderr) + logger = log.With(logger, "listen", *listen, "caller", log.DefaultCaller) + + fieldKeys := []string{"method", "error"} + requestCount := kitprometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: "my_group", + Subsystem: "string_service", + Name: "request_count", + Help: "Number of requests received.", + }, fieldKeys) + requestLatency := kitprometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ + Namespace: "my_group", + Subsystem: "string_service", + Name: "request_latency_microseconds", + Help: "Total duration of requests in microseconds.", + }, fieldKeys) + countResult := kitprometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ + Namespace: "my_group", + Subsystem: "string_service", + Name: "count_result", + Help: "The result of each count method.", + }, []string{}) + + var svc StringService + svc = stringService{} + svc = proxyingMiddleware(context.Background(), *proxy, logger)(svc) + svc = loggingMiddleware(logger)(svc) + svc = instrumentingMiddleware(requestCount, requestLatency, countResult)(svc) + + uppercaseHandler := httptransport.NewServer( + makeUppercaseEndpoint(svc), + decodeUppercaseRequest, + encodeResponse, + ) + countHandler := httptransport.NewServer( + makeCountEndpoint(svc), + decodeCountRequest, + encodeResponse, + ) + + http.Handle("/uppercase", uppercaseHandler) + http.Handle("/count", countHandler) + http.Handle("/metrics", promhttp.Handler()) + logger.Log("msg", "HTTP", "addr", *listen) + logger.Log("err", http.ListenAndServe(*listen, nil)) +} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc3/proxying.go b/vendor/github.com/go-kit/kit/examples/stringsvc3/proxying.go new file mode 100644 index 000000000..0f6780776 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/stringsvc3/proxying.go @@ -0,0 +1,117 @@ +package main + +import ( + "context" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "golang.org/x/time/rate" + + "github.com/sony/gobreaker" + + "github.com/go-kit/kit/circuitbreaker" + "github.com/go-kit/kit/endpoint" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/ratelimit" + "github.com/go-kit/kit/sd" + "github.com/go-kit/kit/sd/lb" + httptransport "github.com/go-kit/kit/transport/http" +) + +func proxyingMiddleware(ctx context.Context, instances string, logger log.Logger) ServiceMiddleware { + // If instances is empty, don't proxy. + if instances == "" { + logger.Log("proxy_to", "none") + return func(next StringService) StringService { return next } + } + + // Set some parameters for our client. + var ( + qps = 100 // beyond which we will return an error + maxAttempts = 3 // per request, before giving up + maxTime = 250 * time.Millisecond // wallclock time, before giving up + ) + + // Otherwise, construct an endpoint for each instance in the list, and add + // it to a fixed set of endpoints. In a real service, rather than doing this + // by hand, you'd probably use package sd's support for your service + // discovery system. + var ( + instanceList = split(instances) + endpointer sd.FixedEndpointer + ) + logger.Log("proxy_to", fmt.Sprint(instanceList)) + for _, instance := range instanceList { + var e endpoint.Endpoint + e = makeUppercaseProxy(ctx, instance) + e = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{}))(e) + e = ratelimit.NewErroringLimiter(rate.NewLimiter(rate.Every(time.Second), qps))(e) + endpointer = append(endpointer, e) + } + + // Now, build a single, retrying, load-balancing endpoint out of all of + // those individual endpoints. + balancer := lb.NewRoundRobin(endpointer) + retry := lb.Retry(maxAttempts, maxTime, balancer) + + // And finally, return the ServiceMiddleware, implemented by proxymw. + return func(next StringService) StringService { + return proxymw{ctx, next, retry} + } +} + +// proxymw implements StringService, forwarding Uppercase requests to the +// provided endpoint, and serving all other (i.e. Count) requests via the +// next StringService. +type proxymw struct { + ctx context.Context + next StringService // Serve most requests via this service... + uppercase endpoint.Endpoint // ...except Uppercase, which gets served by this endpoint +} + +func (mw proxymw) Count(s string) int { + return mw.next.Count(s) +} + +func (mw proxymw) Uppercase(s string) (string, error) { + response, err := mw.uppercase(mw.ctx, uppercaseRequest{S: s}) + if err != nil { + return "", err + } + + resp := response.(uppercaseResponse) + if resp.Err != "" { + return resp.V, errors.New(resp.Err) + } + return resp.V, nil +} + +func makeUppercaseProxy(ctx context.Context, instance string) endpoint.Endpoint { + if !strings.HasPrefix(instance, "http") { + instance = "http://" + instance + } + u, err := url.Parse(instance) + if err != nil { + panic(err) + } + if u.Path == "" { + u.Path = "/uppercase" + } + return httptransport.NewClient( + "GET", + u, + encodeRequest, + decodeUppercaseResponse, + ).Endpoint() +} + +func split(s string) []string { + a := strings.Split(s, ",") + for i := range a { + a[i] = strings.TrimSpace(a[i]) + } + return a +} diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc3/service.go b/vendor/github.com/go-kit/kit/examples/stringsvc3/service.go new file mode 100644 index 000000000..7e1773a92 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/stringsvc3/service.go @@ -0,0 +1,31 @@ +package main + +import ( + "errors" + "strings" +) + +// StringService provides operations on strings. +type StringService interface { + Uppercase(string) (string, error) + Count(string) int +} + +type stringService struct{} + +func (stringService) Uppercase(s string) (string, error) { + if s == "" { + return "", ErrEmpty + } + return strings.ToUpper(s), nil +} + +func (stringService) Count(s string) int { + return len(s) +} + +// ErrEmpty is returned when an input string is empty. +var ErrEmpty = errors.New("empty string") + +// ServiceMiddleware is a chainable behavior modifier for StringService. +type ServiceMiddleware func(StringService) StringService diff --git a/vendor/github.com/go-kit/kit/examples/stringsvc3/transport.go b/vendor/github.com/go-kit/kit/examples/stringsvc3/transport.go new file mode 100644 index 000000000..c17a055c7 --- /dev/null +++ b/vendor/github.com/go-kit/kit/examples/stringsvc3/transport.go @@ -0,0 +1,84 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/go-kit/kit/endpoint" +) + +func makeUppercaseEndpoint(svc StringService) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(uppercaseRequest) + v, err := svc.Uppercase(req.S) + if err != nil { + return uppercaseResponse{v, err.Error()}, nil + } + return uppercaseResponse{v, ""}, nil + } +} + +func makeCountEndpoint(svc StringService) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(countRequest) + v := svc.Count(req.S) + return countResponse{v}, nil + } +} + +func decodeUppercaseRequest(_ context.Context, r *http.Request) (interface{}, error) { + var request uppercaseRequest + if err := json.NewDecoder(r.Body).Decode(&request); err != nil { + return nil, err + } + return request, nil +} + +func decodeCountRequest(_ context.Context, r *http.Request) (interface{}, error) { + var request countRequest + if err := json.NewDecoder(r.Body).Decode(&request); err != nil { + return nil, err + } + return request, nil +} + +func decodeUppercaseResponse(_ context.Context, r *http.Response) (interface{}, error) { + var response uppercaseResponse + if err := json.NewDecoder(r.Body).Decode(&response); err != nil { + return nil, err + } + return response, nil +} + +func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + return json.NewEncoder(w).Encode(response) +} + +func encodeRequest(_ context.Context, r *http.Request, request interface{}) error { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(request); err != nil { + return err + } + r.Body = ioutil.NopCloser(&buf) + return nil +} + +type uppercaseRequest struct { + S string `json:"s"` +} + +type uppercaseResponse struct { + V string `json:"v"` + Err string `json:"err,omitempty"` +} + +type countRequest struct { + S string `json:"s"` +} + +type countResponse struct { + V int `json:"v"` +} diff --git a/vendor/github.com/go-kit/kit/lint b/vendor/github.com/go-kit/kit/lint new file mode 100755 index 000000000..12e307273 --- /dev/null +++ b/vendor/github.com/go-kit/kit/lint @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +if [ ! $(command -v gometalinter) ] +then + go get github.com/alecthomas/gometalinter + gometalinter --update --install +fi + +time gometalinter \ + --exclude='error return value not checked.*(Close|Log|Print).*\(errcheck\)$' \ + --exclude='.*_test\.go:.*error return value not checked.*\(errcheck\)$' \ + --exclude='/thrift/' \ + --exclude='/pb/' \ + --exclude='no args in Log call \(vet\)' \ + --disable=dupl \ + --disable=aligncheck \ + --disable=gotype \ + --cyclo-over=20 \ + --tests \ + --concurrency=2 \ + --deadline=300s \ + ./... diff --git a/vendor/github.com/go-kit/kit/log/README.md b/vendor/github.com/go-kit/kit/log/README.md new file mode 100644 index 000000000..7222f8009 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/README.md @@ -0,0 +1,147 @@ +# package log + +`package log` provides a minimal interface for structured logging in services. +It may be wrapped to encode conventions, enforce type-safety, provide leveled +logging, and so on. It can be used for both typical application log events, +and log-structured data streams. + +## Structured logging + +Structured logging is, basically, conceding to the reality that logs are +_data_, and warrant some level of schematic rigor. Using a stricter, +key/value-oriented message format for our logs, containing contextual and +semantic information, makes it much easier to get insight into the +operational activity of the systems we build. Consequently, `package log` is +of the strong belief that "[the benefits of structured logging outweigh the +minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". + +Migrating from unstructured to structured logging is probably a lot easier +than you'd expect. + +```go +// Unstructured +log.Printf("HTTP server listening on %s", addr) + +// Structured +logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") +``` + +## Usage + +### Typical application logging + +```go +w := log.NewSyncWriter(os.Stderr) +logger := log.NewLogfmtLogger(w) +logger.Log("question", "what is the meaning of life?", "answer", 42) + +// Output: +// question="what is the meaning of life?" answer=42 +``` + +### Contextual Loggers + +```go +func main() { + var logger log.Logger + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = log.With(logger, "instance_id", 123) + + logger.Log("msg", "starting") + NewWorker(log.With(logger, "component", "worker")).Run() + NewSlacker(log.With(logger, "component", "slacker")).Run() +} + +// Output: +// instance_id=123 msg=starting +// instance_id=123 component=worker msg=running +// instance_id=123 component=slacker msg=running +``` + +### Interact with stdlib logger + +Redirect stdlib logger to Go kit logger. + +```go +import ( + "os" + stdlog "log" + kitlog "github.com/go-kit/kit/log" +) + +func main() { + logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout)) + stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) + stdlog.Print("I sure like pie") +} + +// Output: +// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} +``` + +Or, if, for legacy reasons, you need to pipe all of your logging through the +stdlib log package, you can redirect Go kit logger to the stdlib logger. + +```go +logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) +logger.Log("legacy", true, "msg", "at least it's something") + +// Output: +// 2016/01/01 12:34:56 legacy=true msg="at least it's something" +``` + +### Timestamps and callers + +```go +var logger log.Logger +logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) +logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) + +logger.Log("msg", "hello") + +// Output: +// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello +``` + +## Supported output formats + +- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write)) +- JSON + +## Enhancements + +`package log` is centered on the one-method Logger interface. + +```go +type Logger interface { + Log(keyvals ...interface{}) error +} +``` + +This interface, and its supporting code like is the product of much iteration +and evaluation. For more details on the evolution of the Logger interface, +see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), +a talk by [Chris Hines](https://github.com/ChrisHines). +Also, please see +[#63](https://github.com/go-kit/kit/issues/63), +[#76](https://github.com/go-kit/kit/pull/76), +[#131](https://github.com/go-kit/kit/issues/131), +[#157](https://github.com/go-kit/kit/pull/157), +[#164](https://github.com/go-kit/kit/issues/164), and +[#252](https://github.com/go-kit/kit/pull/252) +to review historical conversations about package log and the Logger interface. + +Value-add packages and suggestions, +like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level), +are of course welcome. Good proposals should + +- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With), +- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and +- Be friendly to packages that accept only an unadorned log.Logger. + +## Benchmarks & comparisons + +There are a few Go logging benchmarks and comparisons that include Go kit's package log. + +- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log +- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log diff --git a/vendor/github.com/go-kit/kit/log/benchmark_test.go b/vendor/github.com/go-kit/kit/log/benchmark_test.go new file mode 100644 index 000000000..126bfa5ae --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/benchmark_test.go @@ -0,0 +1,21 @@ +package log_test + +import ( + "testing" + + "github.com/go-kit/kit/log" +) + +func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { + lc := log.With(logger, "common_key", "common_value") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + f(lc) + } +} + +var ( + baseMessage = func(logger log.Logger) { logger.Log("foo_key", "foo_value") } + withMessage = func(logger log.Logger) { log.With(logger, "a", "b").Log("c", "d") } +) diff --git a/vendor/github.com/go-kit/kit/log/concurrency_test.go b/vendor/github.com/go-kit/kit/log/concurrency_test.go new file mode 100644 index 000000000..95a749e77 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/concurrency_test.go @@ -0,0 +1,40 @@ +package log_test + +import ( + "math" + "testing" + + "github.com/go-kit/kit/log" +) + +// These test are designed to be run with the race detector. + +func testConcurrency(t *testing.T, logger log.Logger, total int) { + n := int(math.Sqrt(float64(total))) + share := total / n + + errC := make(chan error, n) + + for i := 0; i < n; i++ { + go func() { + errC <- spam(logger, share) + }() + } + + for i := 0; i < n; i++ { + err := <-errC + if err != nil { + t.Fatalf("concurrent logging error: %v", err) + } + } +} + +func spam(logger log.Logger, count int) error { + for i := 0; i < count; i++ { + err := logger.Log("key", i) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/go-kit/kit/log/deprecated_levels/levels.go b/vendor/github.com/go-kit/kit/log/deprecated_levels/levels.go new file mode 100644 index 000000000..a03421277 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/deprecated_levels/levels.go @@ -0,0 +1,127 @@ +package levels + +import "github.com/go-kit/kit/log" + +// Levels provides a leveled logging wrapper around a logger. It has five +// levels: debug, info, warning (warn), error, and critical (crit). If you +// want a different set of levels, you can create your own levels type very +// easily, and you can elide the configuration. +type Levels struct { + logger log.Logger + levelKey string + + // We have a choice between storing level values in string fields or + // making a separate context for each level. When using string fields the + // Log method must combine the base context, the level data, and the + // logged keyvals; but the With method only requires updating one context. + // If we instead keep a separate context for each level the Log method + // must only append the new keyvals; but the With method would have to + // update all five contexts. + + // Roughly speaking, storing multiple contexts breaks even if the ratio of + // Log/With calls is more than the number of levels. We have chosen to + // make the With method cheap and the Log method a bit more costly because + // we do not expect most applications to Log more than five times for each + // call to With. + + debugValue string + infoValue string + warnValue string + errorValue string + critValue string +} + +// New creates a new leveled logger, wrapping the passed logger. +func New(logger log.Logger, options ...Option) Levels { + l := Levels{ + logger: logger, + levelKey: "level", + + debugValue: "debug", + infoValue: "info", + warnValue: "warn", + errorValue: "error", + critValue: "crit", + } + for _, option := range options { + option(&l) + } + return l +} + +// With returns a new leveled logger that includes keyvals in all log events. +func (l Levels) With(keyvals ...interface{}) Levels { + return Levels{ + logger: log.With(l.logger, keyvals...), + levelKey: l.levelKey, + debugValue: l.debugValue, + infoValue: l.infoValue, + warnValue: l.warnValue, + errorValue: l.errorValue, + critValue: l.critValue, + } +} + +// Debug returns a debug level logger. +func (l Levels) Debug() log.Logger { + return log.WithPrefix(l.logger, l.levelKey, l.debugValue) +} + +// Info returns an info level logger. +func (l Levels) Info() log.Logger { + return log.WithPrefix(l.logger, l.levelKey, l.infoValue) +} + +// Warn returns a warning level logger. +func (l Levels) Warn() log.Logger { + return log.WithPrefix(l.logger, l.levelKey, l.warnValue) +} + +// Error returns an error level logger. +func (l Levels) Error() log.Logger { + return log.WithPrefix(l.logger, l.levelKey, l.errorValue) +} + +// Crit returns a critical level logger. +func (l Levels) Crit() log.Logger { + return log.WithPrefix(l.logger, l.levelKey, l.critValue) +} + +// Option sets a parameter for leveled loggers. +type Option func(*Levels) + +// Key sets the key for the field used to indicate log level. By default, +// the key is "level". +func Key(key string) Option { + return func(l *Levels) { l.levelKey = key } +} + +// DebugValue sets the value for the field used to indicate the debug log +// level. By default, the value is "debug". +func DebugValue(value string) Option { + return func(l *Levels) { l.debugValue = value } +} + +// InfoValue sets the value for the field used to indicate the info log level. +// By default, the value is "info". +func InfoValue(value string) Option { + return func(l *Levels) { l.infoValue = value } +} + +// WarnValue sets the value for the field used to indicate the warning log +// level. By default, the value is "warn". +func WarnValue(value string) Option { + return func(l *Levels) { l.warnValue = value } +} + +// ErrorValue sets the value for the field used to indicate the error log +// level. By default, the value is "error". +func ErrorValue(value string) Option { + return func(l *Levels) { l.errorValue = value } +} + +// CritValue sets the value for the field used to indicate the critical log +// level. By default, the value is "crit". +func CritValue(value string) Option { + return func(l *Levels) { l.critValue = value } +} diff --git a/vendor/github.com/go-kit/kit/log/deprecated_levels/levels_test.go b/vendor/github.com/go-kit/kit/log/deprecated_levels/levels_test.go new file mode 100644 index 000000000..8d4a7f553 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/deprecated_levels/levels_test.go @@ -0,0 +1,65 @@ +package levels_test + +import ( + "bytes" + "os" + "testing" + + "github.com/go-kit/kit/log" + levels "github.com/go-kit/kit/log/deprecated_levels" +) + +func TestDefaultLevels(t *testing.T) { + buf := bytes.Buffer{} + logger := levels.New(log.NewLogfmtLogger(&buf)) + + logger.Debug().Log("msg", "résumé") // of course you'd want to do this + if want, have := "level=debug msg=résumé\n", buf.String(); want != have { + t.Errorf("want %#v, have %#v", want, have) + } + + buf.Reset() + logger.Info().Log("msg", "Åhus") + if want, have := "level=info msg=Åhus\n", buf.String(); want != have { + t.Errorf("want %#v, have %#v", want, have) + } + + buf.Reset() + logger.Error().Log("msg", "© violation") + if want, have := "level=error msg=\"© violation\"\n", buf.String(); want != have { + t.Errorf("want %#v, have %#v", want, have) + } + + buf.Reset() + logger.Crit().Log("msg", " ") + if want, have := "level=crit msg=\"\\t\"\n", buf.String(); want != have { + t.Errorf("want %#v, have %#v", want, have) + } +} + +func TestModifiedLevels(t *testing.T) { + buf := bytes.Buffer{} + logger := levels.New( + log.NewJSONLogger(&buf), + levels.Key("l"), + levels.DebugValue("dbg"), + levels.InfoValue("nfo"), + levels.WarnValue("wrn"), + levels.ErrorValue("err"), + levels.CritValue("crt"), + ) + logger.With("easter_island", "176°").Debug().Log("msg", "moai") + if want, have := `{"easter_island":"176°","l":"dbg","msg":"moai"}`+"\n", buf.String(); want != have { + t.Errorf("want %#v, have %#v", want, have) + } +} + +func ExampleLevels() { + logger := levels.New(log.NewLogfmtLogger(os.Stdout)) + logger.Debug().Log("msg", "hello") + logger.With("context", "foo").Warn().Log("err", "error") + + // Output: + // level=debug msg=hello + // level=warn context=foo err=error +} diff --git a/vendor/github.com/go-kit/kit/log/doc.go b/vendor/github.com/go-kit/kit/log/doc.go new file mode 100644 index 000000000..918c0af46 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/doc.go @@ -0,0 +1,116 @@ +// Package log provides a structured logger. +// +// Structured logging produces logs easily consumed later by humans or +// machines. Humans might be interested in debugging errors, or tracing +// specific requests. Machines might be interested in counting interesting +// events, or aggregating information for off-line processing. In both cases, +// it is important that the log messages are structured and actionable. +// Package log is designed to encourage both of these best practices. +// +// Basic Usage +// +// The fundamental interface is Logger. Loggers create log events from +// key/value data. The Logger interface has a single method, Log, which +// accepts a sequence of alternating key/value pairs, which this package names +// keyvals. +// +// type Logger interface { +// Log(keyvals ...interface{}) error +// } +// +// Here is an example of a function using a Logger to create log events. +// +// func RunTask(task Task, logger log.Logger) string { +// logger.Log("taskID", task.ID, "event", "starting task") +// ... +// logger.Log("taskID", task.ID, "event", "task complete") +// } +// +// The keys in the above example are "taskID" and "event". The values are +// task.ID, "starting task", and "task complete". Every key is followed +// immediately by its value. +// +// Keys are usually plain strings. Values may be any type that has a sensible +// encoding in the chosen log format. With structured logging it is a good +// idea to log simple values without formatting them. This practice allows +// the chosen logger to encode values in the most appropriate way. +// +// Contextual Loggers +// +// A contextual logger stores keyvals that it includes in all log events. +// Building appropriate contextual loggers reduces repetition and aids +// consistency in the resulting log output. With and WithPrefix add context to +// a logger. We can use With to improve the RunTask example. +// +// func RunTask(task Task, logger log.Logger) string { +// logger = log.With(logger, "taskID", task.ID) +// logger.Log("event", "starting task") +// ... +// taskHelper(task.Cmd, logger) +// ... +// logger.Log("event", "task complete") +// } +// +// The improved version emits the same log events as the original for the +// first and last calls to Log. Passing the contextual logger to taskHelper +// enables each log event created by taskHelper to include the task.ID even +// though taskHelper does not have access to that value. Using contextual +// loggers this way simplifies producing log output that enables tracing the +// life cycle of individual tasks. (See the Contextual example for the full +// code of the above snippet.) +// +// Dynamic Contextual Values +// +// A Valuer function stored in a contextual logger generates a new value each +// time an event is logged. The Valuer example demonstrates how this feature +// works. +// +// Valuers provide the basis for consistently logging timestamps and source +// code location. The log package defines several valuers for that purpose. +// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and +// DefaultCaller. A common logger initialization sequence that ensures all log +// entries contain a timestamp and source location looks like this: +// +// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) +// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) +// +// Concurrent Safety +// +// Applications with multiple goroutines want each log event written to the +// same logger to remain separate from other log events. Package log provides +// two simple solutions for concurrent safe logging. +// +// NewSyncWriter wraps an io.Writer and serializes each call to its Write +// method. Using a SyncWriter has the benefit that the smallest practical +// portion of the logging logic is performed within a mutex, but it requires +// the formatting Logger to make only one call to Write per log event. +// +// NewSyncLogger wraps any Logger and serializes each call to its Log method. +// Using a SyncLogger has the benefit that it guarantees each log event is +// handled atomically within the wrapped logger, but it typically serializes +// both the formatting and output logic. Use a SyncLogger if the formatting +// logger may perform multiple writes per log event. +// +// Error Handling +// +// This package relies on the practice of wrapping or decorating loggers with +// other loggers to provide composable pieces of functionality. It also means +// that Logger.Log must return an error because some +// implementations—especially those that output log data to an io.Writer—may +// encounter errors that cannot be handled locally. This in turn means that +// Loggers that wrap other loggers should return errors from the wrapped +// logger up the stack. +// +// Fortunately, the decorator pattern also provides a way to avoid the +// necessity to check for errors every time an application calls Logger.Log. +// An application required to panic whenever its Logger encounters +// an error could initialize its logger as follows. +// +// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) +// logger := log.LoggerFunc(func(keyvals ...interface{}) error { +// if err := fmtlogger.Log(keyvals...); err != nil { +// panic(err) +// } +// return nil +// }) +package log diff --git a/vendor/github.com/go-kit/kit/log/example_test.go b/vendor/github.com/go-kit/kit/log/example_test.go new file mode 100644 index 000000000..976677489 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/example_test.go @@ -0,0 +1,137 @@ +package log_test + +import ( + "math/rand" + "os" + "sync" + "time" + + "github.com/go-kit/kit/log" +) + +func Example_basic() { + logger := log.NewLogfmtLogger(os.Stdout) + + type Task struct { + ID int + } + + RunTask := func(task Task, logger log.Logger) { + logger.Log("taskID", task.ID, "event", "starting task") + + logger.Log("taskID", task.ID, "event", "task complete") + } + + RunTask(Task{ID: 1}, logger) + + // Output: + // taskID=1 event="starting task" + // taskID=1 event="task complete" +} + +func Example_contextual() { + logger := log.NewLogfmtLogger(os.Stdout) + + type Task struct { + ID int + Cmd string + } + + taskHelper := func(cmd string, logger log.Logger) { + // execute(cmd) + logger.Log("cmd", cmd, "dur", 42*time.Millisecond) + } + + RunTask := func(task Task, logger log.Logger) { + logger = log.With(logger, "taskID", task.ID) + logger.Log("event", "starting task") + + taskHelper(task.Cmd, logger) + + logger.Log("event", "task complete") + } + + RunTask(Task{ID: 1, Cmd: "echo Hello, world!"}, logger) + + // Output: + // taskID=1 event="starting task" + // taskID=1 cmd="echo Hello, world!" dur=42ms + // taskID=1 event="task complete" +} + +func Example_valuer() { + logger := log.NewLogfmtLogger(os.Stdout) + + count := 0 + counter := func() interface{} { + count++ + return count + } + + logger = log.With(logger, "count", log.Valuer(counter)) + + logger.Log("call", "first") + logger.Log("call", "second") + + // Output: + // count=1 call=first + // count=2 call=second +} + +func Example_debugInfo() { + logger := log.NewLogfmtLogger(os.Stdout) + + // make time predictable for this test + baseTime := time.Date(2015, time.February, 3, 10, 0, 0, 0, time.UTC) + mockTime := func() time.Time { + baseTime = baseTime.Add(time.Second) + return baseTime + } + + logger = log.With(logger, "time", log.Timestamp(mockTime), "caller", log.DefaultCaller) + + logger.Log("call", "first") + logger.Log("call", "second") + + // ... + + logger.Log("call", "third") + + // Output: + // time=2015-02-03T10:00:01Z caller=example_test.go:93 call=first + // time=2015-02-03T10:00:02Z caller=example_test.go:94 call=second + // time=2015-02-03T10:00:03Z caller=example_test.go:98 call=third +} + +func Example_syncWriter() { + w := log.NewSyncWriter(os.Stdout) + logger := log.NewLogfmtLogger(w) + + type Task struct { + ID int + } + + var wg sync.WaitGroup + + RunTask := func(task Task, logger log.Logger) { + logger.Log("taskID", task.ID, "event", "starting task") + + time.Sleep(time.Duration(rand.Intn(200)) * time.Millisecond) + + logger.Log("taskID", task.ID, "event", "task complete") + wg.Done() + } + + wg.Add(2) + + go RunTask(Task{ID: 1}, logger) + go RunTask(Task{ID: 2}, logger) + + wg.Wait() + + // Unordered output: + // taskID=1 event="starting task" + // taskID=2 event="starting task" + // taskID=1 event="task complete" + // taskID=2 event="task complete" +} diff --git a/vendor/github.com/go-kit/kit/log/json_logger.go b/vendor/github.com/go-kit/kit/log/json_logger.go new file mode 100644 index 000000000..66094b4dd --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/json_logger.go @@ -0,0 +1,89 @@ +package log + +import ( + "encoding" + "encoding/json" + "fmt" + "io" + "reflect" +) + +type jsonLogger struct { + io.Writer +} + +// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a +// single JSON object. Each log event produces no more than one call to +// w.Write. The passed Writer must be safe for concurrent use by multiple +// goroutines if the returned Logger will be used concurrently. +func NewJSONLogger(w io.Writer) Logger { + return &jsonLogger{w} +} + +func (l *jsonLogger) Log(keyvals ...interface{}) error { + n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd + m := make(map[string]interface{}, n) + for i := 0; i < len(keyvals); i += 2 { + k := keyvals[i] + var v interface{} = ErrMissingValue + if i+1 < len(keyvals) { + v = keyvals[i+1] + } + merge(m, k, v) + } + return json.NewEncoder(l.Writer).Encode(m) +} + +func merge(dst map[string]interface{}, k, v interface{}) { + var key string + switch x := k.(type) { + case string: + key = x + case fmt.Stringer: + key = safeString(x) + default: + key = fmt.Sprint(x) + } + + // We want json.Marshaler and encoding.TextMarshaller to take priority over + // err.Error() and v.String(). But json.Marshall (called later) does that by + // default so we force a no-op if it's one of those 2 case. + switch x := v.(type) { + case json.Marshaler: + case encoding.TextMarshaler: + case error: + v = safeError(x) + case fmt.Stringer: + v = safeString(x) + } + + dst[key] = v +} + +func safeString(str fmt.Stringer) (s string) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { + s = "NULL" + } else { + panic(panicVal) + } + } + }() + s = str.String() + return +} + +func safeError(err error) (s interface{}) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + s = nil + } else { + panic(panicVal) + } + } + }() + s = err.Error() + return +} diff --git a/vendor/github.com/go-kit/kit/log/json_logger_test.go b/vendor/github.com/go-kit/kit/log/json_logger_test.go new file mode 100644 index 000000000..e3e309090 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/json_logger_test.go @@ -0,0 +1,162 @@ +package log_test + +import ( + "bytes" + "errors" + "io/ioutil" + "testing" + + "github.com/go-kit/kit/log" +) + +func TestJSONLoggerCaller(t *testing.T) { + t.Parallel() + buf := &bytes.Buffer{} + logger := log.NewJSONLogger(buf) + logger = log.With(logger, "caller", log.DefaultCaller) + + if err := logger.Log(); err != nil { + t.Fatal(err) + } + if want, have := `{"caller":"json_logger_test.go:18"}`+"\n", buf.String(); want != have { + t.Errorf("\nwant %#v\nhave %#v", want, have) + } +} + +func TestJSONLogger(t *testing.T) { + t.Parallel() + buf := &bytes.Buffer{} + logger := log.NewJSONLogger(buf) + if err := logger.Log("err", errors.New("err"), "m", map[string]int{"0": 0}, "a", []int{1, 2, 3}); err != nil { + t.Fatal(err) + } + if want, have := `{"a":[1,2,3],"err":"err","m":{"0":0}}`+"\n", buf.String(); want != have { + t.Errorf("\nwant %#v\nhave %#v", want, have) + } +} + +func TestJSONLoggerMissingValue(t *testing.T) { + t.Parallel() + buf := &bytes.Buffer{} + logger := log.NewJSONLogger(buf) + if err := logger.Log("k"); err != nil { + t.Fatal(err) + } + if want, have := `{"k":"(MISSING)"}`+"\n", buf.String(); want != have { + t.Errorf("\nwant %#v\nhave %#v", want, have) + } +} + +func TestJSONLoggerNilStringerKey(t *testing.T) { + t.Parallel() + + buf := &bytes.Buffer{} + logger := log.NewJSONLogger(buf) + if err := logger.Log((*stringer)(nil), "v"); err != nil { + t.Fatal(err) + } + if want, have := `{"NULL":"v"}`+"\n", buf.String(); want != have { + t.Errorf("\nwant %#v\nhave %#v", want, have) + } +} + +func TestJSONLoggerNilErrorValue(t *testing.T) { + t.Parallel() + + buf := &bytes.Buffer{} + logger := log.NewJSONLogger(buf) + if err := logger.Log("err", (*stringError)(nil)); err != nil { + t.Fatal(err) + } + if want, have := `{"err":null}`+"\n", buf.String(); want != have { + t.Errorf("\nwant %#v\nhave %#v", want, have) + } +} + +// aller implements json.Marshaler, encoding.TextMarshaler, and fmt.Stringer. +type aller struct{} + +func (aller) MarshalJSON() ([]byte, error) { + return []byte("\"json\""), nil +} + +func (aller) MarshalText() ([]byte, error) { + return []byte("text"), nil +} + +func (aller) String() string { + return "string" +} + +func (aller) Error() string { + return "error" +} + +// textstringer implements encoding.TextMarshaler and fmt.Stringer. +type textstringer struct{} + +func (textstringer) MarshalText() ([]byte, error) { + return []byte("text"), nil +} + +func (textstringer) String() string { + return "string" +} + +func TestJSONLoggerStringValue(t *testing.T) { + t.Parallel() + tests := []struct { + v interface{} + expected string + }{ + { + v: aller{}, + expected: `{"v":"json"}`, + }, + { + v: textstringer{}, + expected: `{"v":"text"}`, + }, + { + v: stringer("string"), + expected: `{"v":"string"}`, + }, + } + + for _, test := range tests { + buf := &bytes.Buffer{} + logger := log.NewJSONLogger(buf) + if err := logger.Log("v", test.v); err != nil { + t.Fatal(err) + } + + if want, have := test.expected+"\n", buf.String(); want != have { + t.Errorf("\nwant %#v\nhave %#v", want, have) + } + } +} + +type stringer string + +func (s stringer) String() string { + return string(s) +} + +type stringError string + +func (s stringError) Error() string { + return string(s) +} + +func BenchmarkJSONLoggerSimple(b *testing.B) { + benchmarkRunner(b, log.NewJSONLogger(ioutil.Discard), baseMessage) +} + +func BenchmarkJSONLoggerContextual(b *testing.B) { + benchmarkRunner(b, log.NewJSONLogger(ioutil.Discard), withMessage) +} + +func TestJSONLoggerConcurrency(t *testing.T) { + t.Parallel() + testConcurrency(t, log.NewJSONLogger(ioutil.Discard), 10000) +} diff --git a/vendor/github.com/go-kit/kit/log/level/benchmark_test.go b/vendor/github.com/go-kit/kit/log/level/benchmark_test.go new file mode 100644 index 000000000..4fca6f08f --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/level/benchmark_test.go @@ -0,0 +1,72 @@ +package level_test + +import ( + "io/ioutil" + "testing" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" +) + +func Benchmark(b *testing.B) { + contexts := []struct { + name string + context func(log.Logger) log.Logger + }{ + {"NoContext", func(l log.Logger) log.Logger { + return l + }}, + {"TimeContext", func(l log.Logger) log.Logger { + return log.With(l, "time", log.DefaultTimestampUTC) + }}, + {"CallerContext", func(l log.Logger) log.Logger { + return log.With(l, "caller", log.DefaultCaller) + }}, + {"TimeCallerReqIDContext", func(l log.Logger) log.Logger { + return log.With(l, "time", log.DefaultTimestampUTC, "caller", log.DefaultCaller, "reqID", 29) + }}, + } + + loggers := []struct { + name string + logger log.Logger + }{ + {"Nop", log.NewNopLogger()}, + {"Logfmt", log.NewLogfmtLogger(ioutil.Discard)}, + {"JSON", log.NewJSONLogger(ioutil.Discard)}, + } + + filters := []struct { + name string + filter func(log.Logger) log.Logger + }{ + {"Baseline", func(l log.Logger) log.Logger { + return l + }}, + {"DisallowedLevel", func(l log.Logger) log.Logger { + return level.NewFilter(l, level.AllowInfo()) + }}, + {"AllowedLevel", func(l log.Logger) log.Logger { + return level.NewFilter(l, level.AllowAll()) + }}, + } + + for _, c := range contexts { + b.Run(c.name, func(b *testing.B) { + for _, f := range filters { + b.Run(f.name, func(b *testing.B) { + for _, l := range loggers { + b.Run(l.name, func(b *testing.B) { + logger := c.context(f.filter(l.logger)) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + level.Debug(logger).Log("foo", "bar") + } + }) + } + }) + } + }) + } +} diff --git a/vendor/github.com/go-kit/kit/log/level/doc.go b/vendor/github.com/go-kit/kit/log/level/doc.go new file mode 100644 index 000000000..feadc4cdc --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/level/doc.go @@ -0,0 +1,22 @@ +// Package level implements leveled logging on top of package log. To use the +// level package, create a logger as per normal in your func main, and wrap it +// with level.NewFilter. +// +// var logger log.Logger +// logger = log.NewLogfmtLogger(os.Stderr) +// logger = level.NewFilter(logger, level.AllowInfo()) // <-- +// logger = log.With(logger, "ts", log.DefaultTimestampUTC) +// +// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error +// helper methods to emit leveled log events. +// +// logger.Log("foo", "bar") // as normal, no level +// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get()) +// if value > 100 { +// level.Error(logger).Log("value", value) +// } +// +// NewFilter allows precise control over what happens when a log event is +// emitted without a level key, or if a squelched level is used. Check the +// Option functions for details. +package level diff --git a/vendor/github.com/go-kit/kit/log/level/example_test.go b/vendor/github.com/go-kit/kit/log/level/example_test.go new file mode 100644 index 000000000..fed52e512 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/level/example_test.go @@ -0,0 +1,25 @@ +package level_test + +import ( + "errors" + "os" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" +) + +func Example_basic() { + // setup logger with level filter + logger := log.NewLogfmtLogger(os.Stdout) + logger = level.NewFilter(logger, level.AllowInfo()) + logger = log.With(logger, "caller", log.DefaultCaller) + + // use level helpers to log at different levels + level.Error(logger).Log("err", errors.New("bad data")) + level.Info(logger).Log("event", "data saved") + level.Debug(logger).Log("next item", 17) // filtered + + // Output: + // level=error caller=example_test.go:18 err="bad data" + // level=info caller=example_test.go:19 event="data saved" +} diff --git a/vendor/github.com/go-kit/kit/log/level/level.go b/vendor/github.com/go-kit/kit/log/level/level.go new file mode 100644 index 000000000..dd4ef60e0 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/level/level.go @@ -0,0 +1,205 @@ +package level + +import "github.com/go-kit/kit/log" + +// Error returns a logger that includes a Key/ErrorValue pair. +func Error(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), ErrorValue()) +} + +// Warn returns a logger that includes a Key/WarnValue pair. +func Warn(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), WarnValue()) +} + +// Info returns a logger that includes a Key/InfoValue pair. +func Info(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), InfoValue()) +} + +// Debug returns a logger that includes a Key/DebugValue pair. +func Debug(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), DebugValue()) +} + +// NewFilter wraps next and implements level filtering. See the commentary on +// the Option functions for a detailed description of how to configure levels. +// If no options are provided, all leveled log events created with Debug, +// Info, Warn or Error helper methods are squelched and non-leveled log +// events are passed to next unmodified. +func NewFilter(next log.Logger, options ...Option) log.Logger { + l := &logger{ + next: next, + } + for _, option := range options { + option(l) + } + return l +} + +type logger struct { + next log.Logger + allowed level + squelchNoLevel bool + errNotAllowed error + errNoLevel error +} + +func (l *logger) Log(keyvals ...interface{}) error { + var hasLevel, levelAllowed bool + for i := 1; i < len(keyvals); i += 2 { + if v, ok := keyvals[i].(*levelValue); ok { + hasLevel = true + levelAllowed = l.allowed&v.level != 0 + break + } + } + if !hasLevel && l.squelchNoLevel { + return l.errNoLevel + } + if hasLevel && !levelAllowed { + return l.errNotAllowed + } + return l.next.Log(keyvals...) +} + +// Option sets a parameter for the leveled logger. +type Option func(*logger) + +// AllowAll is an alias for AllowDebug. +func AllowAll() Option { + return AllowDebug() +} + +// AllowDebug allows error, warn, info and debug level log events to pass. +func AllowDebug() Option { + return allowed(levelError | levelWarn | levelInfo | levelDebug) +} + +// AllowInfo allows error, warn and info level log events to pass. +func AllowInfo() Option { + return allowed(levelError | levelWarn | levelInfo) +} + +// AllowWarn allows error and warn level log events to pass. +func AllowWarn() Option { + return allowed(levelError | levelWarn) +} + +// AllowError allows only error level log events to pass. +func AllowError() Option { + return allowed(levelError) +} + +// AllowNone allows no leveled log events to pass. +func AllowNone() Option { + return allowed(0) +} + +func allowed(allowed level) Option { + return func(l *logger) { l.allowed = allowed } +} + +// ErrNotAllowed sets the error to return from Log when it squelches a log +// event disallowed by the configured Allow[Level] option. By default, +// ErrNotAllowed is nil; in this case the log event is squelched with no +// error. +func ErrNotAllowed(err error) Option { + return func(l *logger) { l.errNotAllowed = err } +} + +// SquelchNoLevel instructs Log to squelch log events with no level, so that +// they don't proceed through to the wrapped logger. If SquelchNoLevel is set +// to true and a log event is squelched in this way, the error value +// configured with ErrNoLevel is returned to the caller. +func SquelchNoLevel(squelch bool) Option { + return func(l *logger) { l.squelchNoLevel = squelch } +} + +// ErrNoLevel sets the error to return from Log when it squelches a log event +// with no level. By default, ErrNoLevel is nil; in this case the log event is +// squelched with no error. +func ErrNoLevel(err error) Option { + return func(l *logger) { l.errNoLevel = err } +} + +// NewInjector wraps next and returns a logger that adds a Key/level pair to +// the beginning of log events that don't already contain a level. In effect, +// this gives a default level to logs without a level. +func NewInjector(next log.Logger, level Value) log.Logger { + return &injector{ + next: next, + level: level, + } +} + +type injector struct { + next log.Logger + level interface{} +} + +func (l *injector) Log(keyvals ...interface{}) error { + for i := 1; i < len(keyvals); i += 2 { + if _, ok := keyvals[i].(*levelValue); ok { + return l.next.Log(keyvals...) + } + } + kvs := make([]interface{}, len(keyvals)+2) + kvs[0], kvs[1] = key, l.level + copy(kvs[2:], keyvals) + return l.next.Log(kvs...) +} + +// Value is the interface that each of the canonical level values implement. +// It contains unexported methods that prevent types from other packages from +// implementing it and guaranteeing that NewFilter can distinguish the levels +// defined in this package from all other values. +type Value interface { + String() string + levelVal() +} + +// Key returns the unique key added to log events by the loggers in this +// package. +func Key() interface{} { return key } + +// ErrorValue returns the unique value added to log events by Error. +func ErrorValue() Value { return errorValue } + +// WarnValue returns the unique value added to log events by Warn. +func WarnValue() Value { return warnValue } + +// InfoValue returns the unique value added to log events by Info. +func InfoValue() Value { return infoValue } + +// DebugValue returns the unique value added to log events by Warn. +func DebugValue() Value { return debugValue } + +var ( + // key is of type interfae{} so that it allocates once during package + // initialization and avoids allocating every time the value is added to a + // []interface{} later. + key interface{} = "level" + + errorValue = &levelValue{level: levelError, name: "error"} + warnValue = &levelValue{level: levelWarn, name: "warn"} + infoValue = &levelValue{level: levelInfo, name: "info"} + debugValue = &levelValue{level: levelDebug, name: "debug"} +) + +type level byte + +const ( + levelDebug level = 1 << iota + levelInfo + levelWarn + levelError +) + +type levelValue struct { + name string + level +} + +func (v *levelValue) String() string { return v.name } +func (v *levelValue) levelVal() {} diff --git a/vendor/github.com/go-kit/kit/log/level/level_test.go b/vendor/github.com/go-kit/kit/log/level/level_test.go new file mode 100644 index 000000000..e362effc4 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/level/level_test.go @@ -0,0 +1,235 @@ +package level_test + +import ( + "bytes" + "errors" + "io" + "strings" + "testing" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" +) + +func TestVariousLevels(t *testing.T) { + testCases := []struct { + name string + allowed level.Option + want string + }{ + { + "AllowAll", + level.AllowAll(), + strings.Join([]string{ + `{"level":"debug","this is":"debug log"}`, + `{"level":"info","this is":"info log"}`, + `{"level":"warn","this is":"warn log"}`, + `{"level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowDebug", + level.AllowDebug(), + strings.Join([]string{ + `{"level":"debug","this is":"debug log"}`, + `{"level":"info","this is":"info log"}`, + `{"level":"warn","this is":"warn log"}`, + `{"level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowInfo", + level.AllowInfo(), + strings.Join([]string{ + `{"level":"info","this is":"info log"}`, + `{"level":"warn","this is":"warn log"}`, + `{"level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowWarn", + level.AllowWarn(), + strings.Join([]string{ + `{"level":"warn","this is":"warn log"}`, + `{"level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowError", + level.AllowError(), + strings.Join([]string{ + `{"level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowNone", + level.AllowNone(), + ``, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var buf bytes.Buffer + logger := level.NewFilter(log.NewJSONLogger(&buf), tc.allowed) + + level.Debug(logger).Log("this is", "debug log") + level.Info(logger).Log("this is", "info log") + level.Warn(logger).Log("this is", "warn log") + level.Error(logger).Log("this is", "error log") + + if want, have := tc.want, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant:\n%s\nhave:\n%s", want, have) + } + }) + } +} + +func TestErrNotAllowed(t *testing.T) { + myError := errors.New("squelched!") + opts := []level.Option{ + level.AllowWarn(), + level.ErrNotAllowed(myError), + } + logger := level.NewFilter(log.NewNopLogger(), opts...) + + if want, have := myError, level.Info(logger).Log("foo", "bar"); want != have { + t.Errorf("want %#+v, have %#+v", want, have) + } + + if want, have := error(nil), level.Warn(logger).Log("foo", "bar"); want != have { + t.Errorf("want %#+v, have %#+v", want, have) + } +} + +func TestErrNoLevel(t *testing.T) { + myError := errors.New("no level specified") + + var buf bytes.Buffer + opts := []level.Option{ + level.SquelchNoLevel(true), + level.ErrNoLevel(myError), + } + logger := level.NewFilter(log.NewJSONLogger(&buf), opts...) + + if want, have := myError, logger.Log("foo", "bar"); want != have { + t.Errorf("want %v, have %v", want, have) + } + if want, have := ``, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} + +func TestAllowNoLevel(t *testing.T) { + var buf bytes.Buffer + opts := []level.Option{ + level.SquelchNoLevel(false), + level.ErrNoLevel(errors.New("I should never be returned!")), + } + logger := level.NewFilter(log.NewJSONLogger(&buf), opts...) + + if want, have := error(nil), logger.Log("foo", "bar"); want != have { + t.Errorf("want %v, have %v", want, have) + } + if want, have := `{"foo":"bar"}`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} + +func TestLevelContext(t *testing.T) { + var buf bytes.Buffer + + // Wrapping the level logger with a context allows users to use + // log.DefaultCaller as per normal. + var logger log.Logger + logger = log.NewLogfmtLogger(&buf) + logger = level.NewFilter(logger, level.AllowAll()) + logger = log.With(logger, "caller", log.DefaultCaller) + + level.Info(logger).Log("foo", "bar") + if want, have := `level=info caller=level_test.go:149 foo=bar`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} + +func TestContextLevel(t *testing.T) { + var buf bytes.Buffer + + // Wrapping a context with the level logger still works, but requires users + // to specify a higher callstack depth value. + var logger log.Logger + logger = log.NewLogfmtLogger(&buf) + logger = log.With(logger, "caller", log.Caller(5)) + logger = level.NewFilter(logger, level.AllowAll()) + + level.Info(logger).Log("foo", "bar") + if want, have := `caller=level_test.go:165 level=info foo=bar`, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} + +func TestLevelFormatting(t *testing.T) { + testCases := []struct { + name string + format func(io.Writer) log.Logger + output string + }{ + { + name: "logfmt", + format: log.NewLogfmtLogger, + output: `level=info foo=bar`, + }, + { + name: "JSON", + format: log.NewJSONLogger, + output: `{"foo":"bar","level":"info"}`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var buf bytes.Buffer + + logger := tc.format(&buf) + level.Info(logger).Log("foo", "bar") + if want, have := tc.output, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant: '%s'\nhave '%s'", want, have) + } + }) + } +} + +func TestInjector(t *testing.T) { + var ( + output []interface{} + logger log.Logger + ) + + logger = log.LoggerFunc(func(keyvals ...interface{}) error { + output = keyvals + return nil + }) + logger = level.NewInjector(logger, level.InfoValue()) + + logger.Log("foo", "bar") + if got, want := len(output), 4; got != want { + t.Errorf("missing level not injected: got len==%d, want len==%d", got, want) + } + if got, want := output[0], level.Key(); got != want { + t.Errorf("wrong level key: got %#v, want %#v", got, want) + } + if got, want := output[1], level.InfoValue(); got != want { + t.Errorf("wrong level value: got %#v, want %#v", got, want) + } + + level.Error(logger).Log("foo", "bar") + if got, want := len(output), 4; got != want { + t.Errorf("leveled record modified: got len==%d, want len==%d", got, want) + } + if got, want := output[0], level.Key(); got != want { + t.Errorf("wrong level key: got %#v, want %#v", got, want) + } + if got, want := output[1], level.ErrorValue(); got != want { + t.Errorf("wrong level value: got %#v, want %#v", got, want) + } +} diff --git a/vendor/github.com/go-kit/kit/log/log.go b/vendor/github.com/go-kit/kit/log/log.go new file mode 100644 index 000000000..66a9e2fde --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/log.go @@ -0,0 +1,135 @@ +package log + +import "errors" + +// Logger is the fundamental interface for all log operations. Log creates a +// log event from keyvals, a variadic sequence of alternating keys and values. +// Implementations must be safe for concurrent use by multiple goroutines. In +// particular, any implementation of Logger that appends to keyvals or +// modifies or retains any of its elements must make a copy first. +type Logger interface { + Log(keyvals ...interface{}) error +} + +// ErrMissingValue is appended to keyvals slices with odd length to substitute +// the missing value. +var ErrMissingValue = errors.New("(MISSING)") + +// With returns a new contextual logger with keyvals prepended to those passed +// to calls to Log. If logger is also a contextual logger created by With or +// WithPrefix, keyvals is appended to the existing context. +// +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func With(logger Logger, keyvals ...interface{}) Logger { + if len(keyvals) == 0 { + return logger + } + l := newContext(logger) + kvs := append(l.keyvals, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + return &context{ + logger: l.logger, + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + keyvals: kvs[:len(kvs):len(kvs)], + hasValuer: l.hasValuer || containsValuer(keyvals), + } +} + +// WithPrefix returns a new contextual logger with keyvals prepended to those +// passed to calls to Log. If logger is also a contextual logger created by +// With or WithPrefix, keyvals is prepended to the existing context. +// +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func WithPrefix(logger Logger, keyvals ...interface{}) Logger { + if len(keyvals) == 0 { + return logger + } + l := newContext(logger) + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + n := len(l.keyvals) + len(keyvals) + if len(keyvals)%2 != 0 { + n++ + } + kvs := make([]interface{}, 0, n) + kvs = append(kvs, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + kvs = append(kvs, l.keyvals...) + return &context{ + logger: l.logger, + keyvals: kvs, + hasValuer: l.hasValuer || containsValuer(keyvals), + } +} + +// context is the Logger implementation returned by With and WithPrefix. It +// wraps a Logger and holds keyvals that it includes in all log events. Its +// Log method calls bindValues to generate values for each Valuer in the +// context keyvals. +// +// A context must always have the same number of stack frames between calls to +// its Log method and the eventual binding of Valuers to their value. This +// requirement comes from the functional requirement to allow a context to +// resolve application call site information for a Caller stored in the +// context. To do this we must be able to predict the number of logging +// functions on the stack when bindValues is called. +// +// Two implementation details provide the needed stack depth consistency. +// +// 1. newContext avoids introducing an additional layer when asked to +// wrap another context. +// 2. With and WithPrefix avoid introducing an additional layer by +// returning a newly constructed context with a merged keyvals rather +// than simply wrapping the existing context. +type context struct { + logger Logger + keyvals []interface{} + hasValuer bool +} + +func newContext(logger Logger) *context { + if c, ok := logger.(*context); ok { + return c + } + return &context{logger: logger} +} + +// Log replaces all value elements (odd indexes) containing a Valuer in the +// stored context with their generated value, appends keyvals, and passes the +// result to the wrapped Logger. +func (l *context) Log(keyvals ...interface{}) error { + kvs := append(l.keyvals, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + if l.hasValuer { + // If no keyvals were appended above then we must copy l.keyvals so + // that future log events will reevaluate the stored Valuers. + if len(keyvals) == 0 { + kvs = append([]interface{}{}, l.keyvals...) + } + bindValues(kvs[:len(l.keyvals)]) + } + return l.logger.Log(kvs...) +} + +// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If +// f is a function with the appropriate signature, LoggerFunc(f) is a Logger +// object that calls f. +type LoggerFunc func(...interface{}) error + +// Log implements Logger by calling f(keyvals...). +func (f LoggerFunc) Log(keyvals ...interface{}) error { + return f(keyvals...) +} diff --git a/vendor/github.com/go-kit/kit/log/log_test.go b/vendor/github.com/go-kit/kit/log/log_test.go new file mode 100644 index 000000000..1bf29727e --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/log_test.go @@ -0,0 +1,191 @@ +package log_test + +import ( + "bytes" + "fmt" + "sync" + "testing" + + "github.com/go-kit/kit/log" + "github.com/go-stack/stack" +) + +func TestContext(t *testing.T) { + t.Parallel() + buf := &bytes.Buffer{} + logger := log.NewLogfmtLogger(buf) + + kvs := []interface{}{"a", 123} + lc := log.With(logger, kvs...) + kvs[1] = 0 // With should copy its key values + + lc = log.With(lc, "b", "c") // With should stack + if err := lc.Log("msg", "message"); err != nil { + t.Fatal(err) + } + if want, have := "a=123 b=c msg=message\n", buf.String(); want != have { + t.Errorf("\nwant: %shave: %s", want, have) + } + + buf.Reset() + lc = log.WithPrefix(lc, "p", "first") + if err := lc.Log("msg", "message"); err != nil { + t.Fatal(err) + } + if want, have := "p=first a=123 b=c msg=message\n", buf.String(); want != have { + t.Errorf("\nwant: %shave: %s", want, have) + } +} + +func TestContextMissingValue(t *testing.T) { + t.Parallel() + var output []interface{} + logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { + output = keyvals + return nil + })) + + log.WithPrefix(log.With(logger, "k1"), "k0").Log("k2") + if want, have := 6, len(output); want != have { + t.Errorf("want len(output) == %v, have %v", want, have) + } + for i := 1; i < 6; i += 2 { + if want, have := log.ErrMissingValue, output[i]; want != have { + t.Errorf("want output[%d] == %#v, have %#v", i, want, have) + } + } +} + +// Test that context.Log has a consistent function stack depth when binding +// Valuers, regardless of how many times With has been called. +func TestContextStackDepth(t *testing.T) { + t.Parallel() + fn := fmt.Sprintf("%n", stack.Caller(0)) + + var output []interface{} + + logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { + output = keyvals + return nil + })) + + stackValuer := log.Valuer(func() interface{} { + for i, c := range stack.Trace() { + if fmt.Sprintf("%n", c) == fn { + return i + } + } + t.Fatal("Test function not found in stack trace.") + return nil + }) + + logger = log.With(logger, "stack", stackValuer) + + // Call through interface to get baseline. + logger.Log("k", "v") + want := output[1].(int) + + for len(output) < 10 { + logger.Log("k", "v") + if have := output[1]; have != want { + t.Errorf("%d Withs: have %v, want %v", len(output)/2-1, have, want) + } + + wrapped := log.With(logger) + wrapped.Log("k", "v") + if have := output[1]; have != want { + t.Errorf("%d Withs: have %v, want %v", len(output)/2-1, have, want) + } + + logger = log.With(logger, "k", "v") + } +} + +// Test that With returns a Logger safe for concurrent use. This test +// validates that the stored logging context does not get corrupted when +// multiple clients concurrently log additional keyvals. +// +// This test must be run with go test -cpu 2 (or more) to achieve its goal. +func TestWithConcurrent(t *testing.T) { + // Create some buckets to count how many events each goroutine logs. + const goroutines = 8 + counts := [goroutines]int{} + + // This logger extracts a goroutine id from the last value field and + // increments the referenced bucket. + logger := log.LoggerFunc(func(kv ...interface{}) error { + goroutine := kv[len(kv)-1].(int) + counts[goroutine]++ + return nil + }) + + // With must be careful about handling slices that can grow without + // copying the underlying array, so give it a challenge. + l := log.With(logger, make([]interface{}, 0, 2)...) + + // Start logging concurrently. Each goroutine logs its id so the logger + // can bucket the event counts. + var wg sync.WaitGroup + wg.Add(goroutines) + const n = 10000 + for i := 0; i < goroutines; i++ { + go func(idx int) { + defer wg.Done() + for j := 0; j < n; j++ { + l.Log("goroutineIdx", idx) + } + }(i) + } + wg.Wait() + + for bucket, have := range counts { + if want := n; want != have { + t.Errorf("bucket %d: want %d, have %d", bucket, want, have) // note Errorf + } + } +} + +func BenchmarkDiscard(b *testing.B) { + logger := log.NewNopLogger() + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + logger.Log("k", "v") + } +} + +func BenchmarkOneWith(b *testing.B) { + logger := log.NewNopLogger() + lc := log.With(logger, "k", "v") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + lc.Log("k", "v") + } +} + +func BenchmarkTwoWith(b *testing.B) { + logger := log.NewNopLogger() + lc := log.With(logger, "k", "v") + for i := 1; i < 2; i++ { + lc = log.With(lc, "k", "v") + } + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + lc.Log("k", "v") + } +} + +func BenchmarkTenWith(b *testing.B) { + logger := log.NewNopLogger() + lc := log.With(logger, "k", "v") + for i := 1; i < 10; i++ { + lc = log.With(lc, "k", "v") + } + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + lc.Log("k", "v") + } +} diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger.go b/vendor/github.com/go-kit/kit/log/logfmt_logger.go new file mode 100644 index 000000000..a00305298 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/logfmt_logger.go @@ -0,0 +1,62 @@ +package log + +import ( + "bytes" + "io" + "sync" + + "github.com/go-logfmt/logfmt" +) + +type logfmtEncoder struct { + *logfmt.Encoder + buf bytes.Buffer +} + +func (l *logfmtEncoder) Reset() { + l.Encoder.Reset() + l.buf.Reset() +} + +var logfmtEncoderPool = sync.Pool{ + New: func() interface{} { + var enc logfmtEncoder + enc.Encoder = logfmt.NewEncoder(&enc.buf) + return &enc + }, +} + +type logfmtLogger struct { + w io.Writer +} + +// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in +// logfmt format. Each log event produces no more than one call to w.Write. +// The passed Writer must be safe for concurrent use by multiple goroutines if +// the returned Logger will be used concurrently. +func NewLogfmtLogger(w io.Writer) Logger { + return &logfmtLogger{w} +} + +func (l logfmtLogger) Log(keyvals ...interface{}) error { + enc := logfmtEncoderPool.Get().(*logfmtEncoder) + enc.Reset() + defer logfmtEncoderPool.Put(enc) + + if err := enc.EncodeKeyvals(keyvals...); err != nil { + return err + } + + // Add newline to the end of the buffer + if err := enc.EndRecord(); err != nil { + return err + } + + // The Logger interface requires implementations to be safe for concurrent + // use by multiple goroutines. For this implementation that means making + // only one call to l.w.Write() for each call to Log. + if _, err := l.w.Write(enc.buf.Bytes()); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger_test.go b/vendor/github.com/go-kit/kit/log/logfmt_logger_test.go new file mode 100644 index 000000000..91bbca15c --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/logfmt_logger_test.go @@ -0,0 +1,57 @@ +package log_test + +import ( + "bytes" + "errors" + "io/ioutil" + "testing" + + "github.com/go-kit/kit/log" + "github.com/go-logfmt/logfmt" +) + +func TestLogfmtLogger(t *testing.T) { + t.Parallel() + buf := &bytes.Buffer{} + logger := log.NewLogfmtLogger(buf) + + if err := logger.Log("hello", "world"); err != nil { + t.Fatal(err) + } + if want, have := "hello=world\n", buf.String(); want != have { + t.Errorf("want %#v, have %#v", want, have) + } + + buf.Reset() + if err := logger.Log("a", 1, "err", errors.New("error")); err != nil { + t.Fatal(err) + } + if want, have := "a=1 err=error\n", buf.String(); want != have { + t.Errorf("want %#v, have %#v", want, have) + } + + buf.Reset() + if err := logger.Log("std_map", map[int]int{1: 2}, "my_map", mymap{0: 0}); err != nil { + t.Fatal(err) + } + if want, have := "std_map=\""+logfmt.ErrUnsupportedValueType.Error()+"\" my_map=special_behavior\n", buf.String(); want != have { + t.Errorf("want %#v, have %#v", want, have) + } +} + +func BenchmarkLogfmtLoggerSimple(b *testing.B) { + benchmarkRunner(b, log.NewLogfmtLogger(ioutil.Discard), baseMessage) +} + +func BenchmarkLogfmtLoggerContextual(b *testing.B) { + benchmarkRunner(b, log.NewLogfmtLogger(ioutil.Discard), withMessage) +} + +func TestLogfmtLoggerConcurrency(t *testing.T) { + t.Parallel() + testConcurrency(t, log.NewLogfmtLogger(ioutil.Discard), 10000) +} + +type mymap map[int]int + +func (m mymap) String() string { return "special_behavior" } diff --git a/vendor/github.com/go-kit/kit/log/nop_logger.go b/vendor/github.com/go-kit/kit/log/nop_logger.go new file mode 100644 index 000000000..1047d626c --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/nop_logger.go @@ -0,0 +1,8 @@ +package log + +type nopLogger struct{} + +// NewNopLogger returns a logger that doesn't do anything. +func NewNopLogger() Logger { return nopLogger{} } + +func (nopLogger) Log(...interface{}) error { return nil } diff --git a/vendor/github.com/go-kit/kit/log/nop_logger_test.go b/vendor/github.com/go-kit/kit/log/nop_logger_test.go new file mode 100644 index 000000000..908ddd816 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/nop_logger_test.go @@ -0,0 +1,26 @@ +package log_test + +import ( + "testing" + + "github.com/go-kit/kit/log" +) + +func TestNopLogger(t *testing.T) { + t.Parallel() + logger := log.NewNopLogger() + if err := logger.Log("abc", 123); err != nil { + t.Error(err) + } + if err := log.With(logger, "def", "ghi").Log(); err != nil { + t.Error(err) + } +} + +func BenchmarkNopLoggerSimple(b *testing.B) { + benchmarkRunner(b, log.NewNopLogger(), baseMessage) +} + +func BenchmarkNopLoggerContextual(b *testing.B) { + benchmarkRunner(b, log.NewNopLogger(), withMessage) +} diff --git a/vendor/github.com/go-kit/kit/log/stdlib.go b/vendor/github.com/go-kit/kit/log/stdlib.go new file mode 100644 index 000000000..ff96b5dee --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/stdlib.go @@ -0,0 +1,116 @@ +package log + +import ( + "io" + "log" + "regexp" + "strings" +) + +// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's +// designed to be passed to a Go kit logger as the writer, for cases where +// it's necessary to redirect all Go kit log output to the stdlib logger. +// +// If you have any choice in the matter, you shouldn't use this. Prefer to +// redirect the stdlib log to the Go kit logger via NewStdlibAdapter. +type StdlibWriter struct{} + +// Write implements io.Writer. +func (w StdlibWriter) Write(p []byte) (int, error) { + log.Print(strings.TrimSpace(string(p))) + return len(p), nil +} + +// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib +// logger's SetOutput. It will extract date/timestamps, filenames, and +// messages, and place them under relevant keys. +type StdlibAdapter struct { + Logger + timestampKey string + fileKey string + messageKey string +} + +// StdlibAdapterOption sets a parameter for the StdlibAdapter. +type StdlibAdapterOption func(*StdlibAdapter) + +// TimestampKey sets the key for the timestamp field. By default, it's "ts". +func TimestampKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.timestampKey = key } +} + +// FileKey sets the key for the file and line field. By default, it's "caller". +func FileKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.fileKey = key } +} + +// MessageKey sets the key for the actual log message. By default, it's "msg". +func MessageKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.messageKey = key } +} + +// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed +// logger. It's designed to be passed to log.SetOutput. +func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { + a := StdlibAdapter{ + Logger: logger, + timestampKey: "ts", + fileKey: "caller", + messageKey: "msg", + } + for _, option := range options { + option(&a) + } + return a +} + +func (a StdlibAdapter) Write(p []byte) (int, error) { + result := subexps(p) + keyvals := []interface{}{} + var timestamp string + if date, ok := result["date"]; ok && date != "" { + timestamp = date + } + if time, ok := result["time"]; ok && time != "" { + if timestamp != "" { + timestamp += " " + } + timestamp += time + } + if timestamp != "" { + keyvals = append(keyvals, a.timestampKey, timestamp) + } + if file, ok := result["file"]; ok && file != "" { + keyvals = append(keyvals, a.fileKey, file) + } + if msg, ok := result["msg"]; ok { + keyvals = append(keyvals, a.messageKey, msg) + } + if err := a.Logger.Log(keyvals...); err != nil { + return 0, err + } + return len(p), nil +} + +const ( + logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?` + logRegexpTime = `(?P + + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html new file mode 100644 index 000000000..26e5d8b4e --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html @@ -0,0 +1,48 @@ + + + + HTTP vs UTF-8 BOM + + + + + + + + + + + +

HTTP vs UTF-8 BOM

+ + +
+ + +
 
+ + + + + +
+

A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.

+

The HTTP header attempts to set the character encoding to ISO 8859-15. The page starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

If the test is unsuccessful, the characters  should appear at the top of the page. These represent the bytes that make up the UTF-8 signature when encountered in the ISO 8859-15 encoding.

+
+
+
HTML5
+

the-input-byte-stream-034
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html new file mode 100644 index 000000000..2f07e9515 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html @@ -0,0 +1,49 @@ + + + + HTTP vs meta charset + + + + + + + + + + + +

HTTP vs meta charset

+ + +
+ + +
 
+ + + + + +
+

The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.

+

The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-1.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-018
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html new file mode 100644 index 000000000..6853cddec --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html @@ -0,0 +1,49 @@ + + + + HTTP vs meta content + + + + + + + + + + + +

HTTP vs meta content

+ + +
+ + +
 
+ + + + + +
+

The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.

+

The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-1.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-016
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html b/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html new file mode 100644 index 000000000..612e26c6c --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html @@ -0,0 +1,47 @@ + + + + No encoding declaration + + + + + + + + + + + +

No encoding declaration

+ + +
+ + +
 
+ + + + + +
+

A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.

+

The test on this page contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-015
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/README b/vendor/golang.org/x/net/html/charset/testdata/README new file mode 100644 index 000000000..38ef0f9f1 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/README @@ -0,0 +1,9 @@ +These test cases come from +http://www.w3.org/International/tests/repository/html5/the-input-byte-stream/results-basics + +Distributed under both the W3C Test Suite License +(http://www.w3.org/Consortium/Legal/2008/04-testsuite-license) +and the W3C 3-clause BSD License +(http://www.w3.org/Consortium/Legal/2008/03-bsd-license). +To contribute to a W3C Test Suite, see the policies and contribution +forms (http://www.w3.org/2004/10/27-testcases). diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html new file mode 100644 index 000000000..3abf7a934 Binary files /dev/null and b/vendor/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html differ diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html new file mode 100644 index 000000000..76254c980 Binary files /dev/null and b/vendor/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html differ diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html new file mode 100644 index 000000000..83de43338 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html @@ -0,0 +1,49 @@ + + + + UTF-8 BOM vs meta charset + + + + + + + + + + + +

UTF-8 BOM vs meta charset

+ + +
+ + +
 
+ + + + + +
+

A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.

+

The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-038
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html new file mode 100644 index 000000000..501aac2d6 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html @@ -0,0 +1,48 @@ + + + + UTF-8 BOM vs meta content + + + + + + + + + + + +

UTF-8 BOM vs meta content

+ + +
+ + +
 
+ + + + + +
+

A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.

+

The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ýäè. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-037
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html b/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html new file mode 100644 index 000000000..2d7d25aba --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html @@ -0,0 +1,48 @@ + + + + meta charset attribute + + + + + + + + + + + +

meta charset attribute

+ + +
+ + +
 
+ + + + + +
+

The character encoding of the page can be set by a meta element with charset attribute.

+

The only character encoding declaration for this HTML file is in the charset attribute of the meta element, which declares the encoding to be ISO 8859-15.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-009
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html b/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html new file mode 100644 index 000000000..1c3f228e7 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html @@ -0,0 +1,48 @@ + + + + meta content attribute + + + + + + + + + + + +

meta content attribute

+ + +
+ + +
 
+ + + + + +
+

The character encoding of the page can be set by a meta element with http-equiv and content attributes.

+

The only character encoding declaration for this HTML file is in the content attribute of the meta element, which declares the encoding to be ISO 8859-15.

The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.

+
+
+
HTML5
+

the-input-byte-stream-007
Result summary & related tests
Detailed results for this test
Link to spec

+
Assumptions:
  • The default encoding for the browser you are testing is not set to ISO 8859-15.
  • +
  • The test is read from a server that supports HTTP.
+
+ + + + + + diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go new file mode 100644 index 000000000..5eb7c5a8f --- /dev/null +++ b/vendor/golang.org/x/net/html/const.go @@ -0,0 +1,104 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// Section 12.2.4.2 of the HTML5 specification says "The following elements +// have varying levels of special parsing rules". +// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements +var isSpecialElementMap = map[string]bool{ + "address": true, + "applet": true, + "area": true, + "article": true, + "aside": true, + "base": true, + "basefont": true, + "bgsound": true, + "blockquote": true, + "body": true, + "br": true, + "button": true, + "caption": true, + "center": true, + "col": true, + "colgroup": true, + "dd": true, + "details": true, + "dir": true, + "div": true, + "dl": true, + "dt": true, + "embed": true, + "fieldset": true, + "figcaption": true, + "figure": true, + "footer": true, + "form": true, + "frame": true, + "frameset": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "header": true, + "hgroup": true, + "hr": true, + "html": true, + "iframe": true, + "img": true, + "input": true, + "isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility. + "keygen": true, + "li": true, + "link": true, + "listing": true, + "main": true, + "marquee": true, + "menu": true, + "meta": true, + "nav": true, + "noembed": true, + "noframes": true, + "noscript": true, + "object": true, + "ol": true, + "p": true, + "param": true, + "plaintext": true, + "pre": true, + "script": true, + "section": true, + "select": true, + "source": true, + "style": true, + "summary": true, + "table": true, + "tbody": true, + "td": true, + "template": true, + "textarea": true, + "tfoot": true, + "th": true, + "thead": true, + "title": true, + "tr": true, + "track": true, + "ul": true, + "wbr": true, + "xmp": true, +} + +func isSpecialElement(element *Node) bool { + switch element.Namespace { + case "", "html": + return isSpecialElementMap[element.Data] + case "svg": + return element.Data == "foreignObject" + } + return false +} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go new file mode 100644 index 000000000..822ed42a0 --- /dev/null +++ b/vendor/golang.org/x/net/html/doc.go @@ -0,0 +1,106 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package html implements an HTML5-compliant tokenizer and parser. + +Tokenization is done by creating a Tokenizer for an io.Reader r. It is the +caller's responsibility to ensure that r provides UTF-8 encoded HTML. + + z := html.NewTokenizer(r) + +Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), +which parses the next token and returns its type, or an error: + + for { + tt := z.Next() + if tt == html.ErrorToken { + // ... + return ... + } + // Process the current token. + } + +There are two APIs for retrieving the current token. The high-level API is to +call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs +allow optionally calling Raw after Next but before Token, Text, TagName, or +TagAttr. In EBNF notation, the valid call sequence per token is: + + Next {Raw} [ Token | Text | TagName {TagAttr} ] + +Token returns an independent data structure that completely describes a token. +Entities (such as "<") are unescaped, tag names and attribute keys are +lower-cased, and attributes are collected into a []Attribute. For example: + + for { + if z.Next() == html.ErrorToken { + // Returning io.EOF indicates success. + return z.Err() + } + emitToken(z.Token()) + } + +The low-level API performs fewer allocations and copies, but the contents of +the []byte values returned by Text, TagName and TagAttr may change on the next +call to Next. For example, to extract an HTML page's anchor text: + + depth := 0 + for { + tt := z.Next() + switch tt { + case html.ErrorToken: + return z.Err() + case html.TextToken: + if depth > 0 { + // emitBytes should copy the []byte it receives, + // if it doesn't process it immediately. + emitBytes(z.Text()) + } + case html.StartTagToken, html.EndTagToken: + tn, _ := z.TagName() + if len(tn) == 1 && tn[0] == 'a' { + if tt == html.StartTagToken { + depth++ + } else { + depth-- + } + } + } + } + +Parsing is done by calling Parse with an io.Reader, which returns the root of +the parse tree (the document element) as a *Node. It is the caller's +responsibility to ensure that the Reader provides UTF-8 encoded HTML. For +example, to process each anchor node in depth-first order: + + doc, err := html.Parse(r) + if err != nil { + // ... + } + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + // Do something with n... + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + +The relevant specifications include: +https://html.spec.whatwg.org/multipage/syntax.html and +https://html.spec.whatwg.org/multipage/syntax.html#tokenization +*/ +package html // import "golang.org/x/net/html" + +// The tokenization algorithm implemented by this package is not a line-by-line +// transliteration of the relatively verbose state-machine in the WHATWG +// specification. A more direct approach is used instead, where the program +// counter implies the state, such as whether it is tokenizing a tag or a text +// node. Specification compliance is verified by checking expected and actual +// outputs over a test suite rather than aiming for algorithmic fidelity. + +// TODO(nigeltao): Does a DOM API belong in this package or a separate one? +// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go new file mode 100644 index 000000000..c484e5a94 --- /dev/null +++ b/vendor/golang.org/x/net/html/doctype.go @@ -0,0 +1,156 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +// parseDoctype parses the data from a DoctypeToken into a name, +// public identifier, and system identifier. It returns a Node whose Type +// is DoctypeNode, whose Data is the name, and which has attributes +// named "system" and "public" for the two identifiers if they were present. +// quirks is whether the document should be parsed in "quirks mode". +func parseDoctype(s string) (n *Node, quirks bool) { + n = &Node{Type: DoctypeNode} + + // Find the name. + space := strings.IndexAny(s, whitespace) + if space == -1 { + space = len(s) + } + n.Data = s[:space] + // The comparison to "html" is case-sensitive. + if n.Data != "html" { + quirks = true + } + n.Data = strings.ToLower(n.Data) + s = strings.TrimLeft(s[space:], whitespace) + + if len(s) < 6 { + // It can't start with "PUBLIC" or "SYSTEM". + // Ignore the rest of the string. + return n, quirks || s != "" + } + + key := strings.ToLower(s[:6]) + s = s[6:] + for key == "public" || key == "system" { + s = strings.TrimLeft(s, whitespace) + if s == "" { + break + } + quote := s[0] + if quote != '"' && quote != '\'' { + break + } + s = s[1:] + q := strings.IndexRune(s, rune(quote)) + var id string + if q == -1 { + id = s + s = "" + } else { + id = s[:q] + s = s[q+1:] + } + n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) + if key == "public" { + key = "system" + } else { + key = "" + } + } + + if key != "" || s != "" { + quirks = true + } else if len(n.Attr) > 0 { + if n.Attr[0].Key == "public" { + public := strings.ToLower(n.Attr[0].Val) + switch public { + case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": + quirks = true + default: + for _, q := range quirkyIDs { + if strings.HasPrefix(public, q) { + quirks = true + break + } + } + } + // The following two public IDs only cause quirks mode if there is no system ID. + if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || + strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { + quirks = true + } + } + if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && + strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + quirks = true + } + } + + return n, quirks +} + +// quirkyIDs is a list of public doctype identifiers that cause a document +// to be interpreted in quirks mode. The identifiers should be in lower case. +var quirkyIDs = []string{ + "+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//", +} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go new file mode 100644 index 000000000..a50c04c60 --- /dev/null +++ b/vendor/golang.org/x/net/html/entity.go @@ -0,0 +1,2253 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// All entities that do not end with ';' are 6 or fewer bytes long. +const longestEntityWithoutSemicolon = 6 + +// entity is a map from HTML entity names to their values. The semicolon matters: +// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references +// lists both "amp" and "amp;" as two separate entries. +// +// Note that the HTML5 list is larger than the HTML4 list at +// http://www.w3.org/TR/html4/sgml/entities.html +var entity = map[string]rune{ + "AElig;": '\U000000C6', + "AMP;": '\U00000026', + "Aacute;": '\U000000C1', + "Abreve;": '\U00000102', + "Acirc;": '\U000000C2', + "Acy;": '\U00000410', + "Afr;": '\U0001D504', + "Agrave;": '\U000000C0', + "Alpha;": '\U00000391', + "Amacr;": '\U00000100', + "And;": '\U00002A53', + "Aogon;": '\U00000104', + "Aopf;": '\U0001D538', + "ApplyFunction;": '\U00002061', + "Aring;": '\U000000C5', + "Ascr;": '\U0001D49C', + "Assign;": '\U00002254', + "Atilde;": '\U000000C3', + "Auml;": '\U000000C4', + "Backslash;": '\U00002216', + "Barv;": '\U00002AE7', + "Barwed;": '\U00002306', + "Bcy;": '\U00000411', + "Because;": '\U00002235', + "Bernoullis;": '\U0000212C', + "Beta;": '\U00000392', + "Bfr;": '\U0001D505', + "Bopf;": '\U0001D539', + "Breve;": '\U000002D8', + "Bscr;": '\U0000212C', + "Bumpeq;": '\U0000224E', + "CHcy;": '\U00000427', + "COPY;": '\U000000A9', + "Cacute;": '\U00000106', + "Cap;": '\U000022D2', + "CapitalDifferentialD;": '\U00002145', + "Cayleys;": '\U0000212D', + "Ccaron;": '\U0000010C', + "Ccedil;": '\U000000C7', + "Ccirc;": '\U00000108', + "Cconint;": '\U00002230', + "Cdot;": '\U0000010A', + "Cedilla;": '\U000000B8', + "CenterDot;": '\U000000B7', + "Cfr;": '\U0000212D', + "Chi;": '\U000003A7', + "CircleDot;": '\U00002299', + "CircleMinus;": '\U00002296', + "CirclePlus;": '\U00002295', + "CircleTimes;": '\U00002297', + "ClockwiseContourIntegral;": '\U00002232', + "CloseCurlyDoubleQuote;": '\U0000201D', + "CloseCurlyQuote;": '\U00002019', + "Colon;": '\U00002237', + "Colone;": '\U00002A74', + "Congruent;": '\U00002261', + "Conint;": '\U0000222F', + "ContourIntegral;": '\U0000222E', + "Copf;": '\U00002102', + "Coproduct;": '\U00002210', + "CounterClockwiseContourIntegral;": '\U00002233', + "Cross;": '\U00002A2F', + "Cscr;": '\U0001D49E', + "Cup;": '\U000022D3', + "CupCap;": '\U0000224D', + "DD;": '\U00002145', + "DDotrahd;": '\U00002911', + "DJcy;": '\U00000402', + "DScy;": '\U00000405', + "DZcy;": '\U0000040F', + "Dagger;": '\U00002021', + "Darr;": '\U000021A1', + "Dashv;": '\U00002AE4', + "Dcaron;": '\U0000010E', + "Dcy;": '\U00000414', + "Del;": '\U00002207', + "Delta;": '\U00000394', + "Dfr;": '\U0001D507', + "DiacriticalAcute;": '\U000000B4', + "DiacriticalDot;": '\U000002D9', + "DiacriticalDoubleAcute;": '\U000002DD', + "DiacriticalGrave;": '\U00000060', + "DiacriticalTilde;": '\U000002DC', + "Diamond;": '\U000022C4', + "DifferentialD;": '\U00002146', + "Dopf;": '\U0001D53B', + "Dot;": '\U000000A8', + "DotDot;": '\U000020DC', + "DotEqual;": '\U00002250', + "DoubleContourIntegral;": '\U0000222F', + "DoubleDot;": '\U000000A8', + "DoubleDownArrow;": '\U000021D3', + "DoubleLeftArrow;": '\U000021D0', + "DoubleLeftRightArrow;": '\U000021D4', + "DoubleLeftTee;": '\U00002AE4', + "DoubleLongLeftArrow;": '\U000027F8', + "DoubleLongLeftRightArrow;": '\U000027FA', + "DoubleLongRightArrow;": '\U000027F9', + "DoubleRightArrow;": '\U000021D2', + "DoubleRightTee;": '\U000022A8', + "DoubleUpArrow;": '\U000021D1', + "DoubleUpDownArrow;": '\U000021D5', + "DoubleVerticalBar;": '\U00002225', + "DownArrow;": '\U00002193', + "DownArrowBar;": '\U00002913', + "DownArrowUpArrow;": '\U000021F5', + "DownBreve;": '\U00000311', + "DownLeftRightVector;": '\U00002950', + "DownLeftTeeVector;": '\U0000295E', + "DownLeftVector;": '\U000021BD', + "DownLeftVectorBar;": '\U00002956', + "DownRightTeeVector;": '\U0000295F', + "DownRightVector;": '\U000021C1', + "DownRightVectorBar;": '\U00002957', + "DownTee;": '\U000022A4', + "DownTeeArrow;": '\U000021A7', + "Downarrow;": '\U000021D3', + "Dscr;": '\U0001D49F', + "Dstrok;": '\U00000110', + "ENG;": '\U0000014A', + "ETH;": '\U000000D0', + "Eacute;": '\U000000C9', + "Ecaron;": '\U0000011A', + "Ecirc;": '\U000000CA', + "Ecy;": '\U0000042D', + "Edot;": '\U00000116', + "Efr;": '\U0001D508', + "Egrave;": '\U000000C8', + "Element;": '\U00002208', + "Emacr;": '\U00000112', + "EmptySmallSquare;": '\U000025FB', + "EmptyVerySmallSquare;": '\U000025AB', + "Eogon;": '\U00000118', + "Eopf;": '\U0001D53C', + "Epsilon;": '\U00000395', + "Equal;": '\U00002A75', + "EqualTilde;": '\U00002242', + "Equilibrium;": '\U000021CC', + "Escr;": '\U00002130', + "Esim;": '\U00002A73', + "Eta;": '\U00000397', + "Euml;": '\U000000CB', + "Exists;": '\U00002203', + "ExponentialE;": '\U00002147', + "Fcy;": '\U00000424', + "Ffr;": '\U0001D509', + "FilledSmallSquare;": '\U000025FC', + "FilledVerySmallSquare;": '\U000025AA', + "Fopf;": '\U0001D53D', + "ForAll;": '\U00002200', + "Fouriertrf;": '\U00002131', + "Fscr;": '\U00002131', + "GJcy;": '\U00000403', + "GT;": '\U0000003E', + "Gamma;": '\U00000393', + "Gammad;": '\U000003DC', + "Gbreve;": '\U0000011E', + "Gcedil;": '\U00000122', + "Gcirc;": '\U0000011C', + "Gcy;": '\U00000413', + "Gdot;": '\U00000120', + "Gfr;": '\U0001D50A', + "Gg;": '\U000022D9', + "Gopf;": '\U0001D53E', + "GreaterEqual;": '\U00002265', + "GreaterEqualLess;": '\U000022DB', + "GreaterFullEqual;": '\U00002267', + "GreaterGreater;": '\U00002AA2', + "GreaterLess;": '\U00002277', + "GreaterSlantEqual;": '\U00002A7E', + "GreaterTilde;": '\U00002273', + "Gscr;": '\U0001D4A2', + "Gt;": '\U0000226B', + "HARDcy;": '\U0000042A', + "Hacek;": '\U000002C7', + "Hat;": '\U0000005E', + "Hcirc;": '\U00000124', + "Hfr;": '\U0000210C', + "HilbertSpace;": '\U0000210B', + "Hopf;": '\U0000210D', + "HorizontalLine;": '\U00002500', + "Hscr;": '\U0000210B', + "Hstrok;": '\U00000126', + "HumpDownHump;": '\U0000224E', + "HumpEqual;": '\U0000224F', + "IEcy;": '\U00000415', + "IJlig;": '\U00000132', + "IOcy;": '\U00000401', + "Iacute;": '\U000000CD', + "Icirc;": '\U000000CE', + "Icy;": '\U00000418', + "Idot;": '\U00000130', + "Ifr;": '\U00002111', + "Igrave;": '\U000000CC', + "Im;": '\U00002111', + "Imacr;": '\U0000012A', + "ImaginaryI;": '\U00002148', + "Implies;": '\U000021D2', + "Int;": '\U0000222C', + "Integral;": '\U0000222B', + "Intersection;": '\U000022C2', + "InvisibleComma;": '\U00002063', + "InvisibleTimes;": '\U00002062', + "Iogon;": '\U0000012E', + "Iopf;": '\U0001D540', + "Iota;": '\U00000399', + "Iscr;": '\U00002110', + "Itilde;": '\U00000128', + "Iukcy;": '\U00000406', + "Iuml;": '\U000000CF', + "Jcirc;": '\U00000134', + "Jcy;": '\U00000419', + "Jfr;": '\U0001D50D', + "Jopf;": '\U0001D541', + "Jscr;": '\U0001D4A5', + "Jsercy;": '\U00000408', + "Jukcy;": '\U00000404', + "KHcy;": '\U00000425', + "KJcy;": '\U0000040C', + "Kappa;": '\U0000039A', + "Kcedil;": '\U00000136', + "Kcy;": '\U0000041A', + "Kfr;": '\U0001D50E', + "Kopf;": '\U0001D542', + "Kscr;": '\U0001D4A6', + "LJcy;": '\U00000409', + "LT;": '\U0000003C', + "Lacute;": '\U00000139', + "Lambda;": '\U0000039B', + "Lang;": '\U000027EA', + "Laplacetrf;": '\U00002112', + "Larr;": '\U0000219E', + "Lcaron;": '\U0000013D', + "Lcedil;": '\U0000013B', + "Lcy;": '\U0000041B', + "LeftAngleBracket;": '\U000027E8', + "LeftArrow;": '\U00002190', + "LeftArrowBar;": '\U000021E4', + "LeftArrowRightArrow;": '\U000021C6', + "LeftCeiling;": '\U00002308', + "LeftDoubleBracket;": '\U000027E6', + "LeftDownTeeVector;": '\U00002961', + "LeftDownVector;": '\U000021C3', + "LeftDownVectorBar;": '\U00002959', + "LeftFloor;": '\U0000230A', + "LeftRightArrow;": '\U00002194', + "LeftRightVector;": '\U0000294E', + "LeftTee;": '\U000022A3', + "LeftTeeArrow;": '\U000021A4', + "LeftTeeVector;": '\U0000295A', + "LeftTriangle;": '\U000022B2', + "LeftTriangleBar;": '\U000029CF', + "LeftTriangleEqual;": '\U000022B4', + "LeftUpDownVector;": '\U00002951', + "LeftUpTeeVector;": '\U00002960', + "LeftUpVector;": '\U000021BF', + "LeftUpVectorBar;": '\U00002958', + "LeftVector;": '\U000021BC', + "LeftVectorBar;": '\U00002952', + "Leftarrow;": '\U000021D0', + "Leftrightarrow;": '\U000021D4', + "LessEqualGreater;": '\U000022DA', + "LessFullEqual;": '\U00002266', + "LessGreater;": '\U00002276', + "LessLess;": '\U00002AA1', + "LessSlantEqual;": '\U00002A7D', + "LessTilde;": '\U00002272', + "Lfr;": '\U0001D50F', + "Ll;": '\U000022D8', + "Lleftarrow;": '\U000021DA', + "Lmidot;": '\U0000013F', + "LongLeftArrow;": '\U000027F5', + "LongLeftRightArrow;": '\U000027F7', + "LongRightArrow;": '\U000027F6', + "Longleftarrow;": '\U000027F8', + "Longleftrightarrow;": '\U000027FA', + "Longrightarrow;": '\U000027F9', + "Lopf;": '\U0001D543', + "LowerLeftArrow;": '\U00002199', + "LowerRightArrow;": '\U00002198', + "Lscr;": '\U00002112', + "Lsh;": '\U000021B0', + "Lstrok;": '\U00000141', + "Lt;": '\U0000226A', + "Map;": '\U00002905', + "Mcy;": '\U0000041C', + "MediumSpace;": '\U0000205F', + "Mellintrf;": '\U00002133', + "Mfr;": '\U0001D510', + "MinusPlus;": '\U00002213', + "Mopf;": '\U0001D544', + "Mscr;": '\U00002133', + "Mu;": '\U0000039C', + "NJcy;": '\U0000040A', + "Nacute;": '\U00000143', + "Ncaron;": '\U00000147', + "Ncedil;": '\U00000145', + "Ncy;": '\U0000041D', + "NegativeMediumSpace;": '\U0000200B', + "NegativeThickSpace;": '\U0000200B', + "NegativeThinSpace;": '\U0000200B', + "NegativeVeryThinSpace;": '\U0000200B', + "NestedGreaterGreater;": '\U0000226B', + "NestedLessLess;": '\U0000226A', + "NewLine;": '\U0000000A', + "Nfr;": '\U0001D511', + "NoBreak;": '\U00002060', + "NonBreakingSpace;": '\U000000A0', + "Nopf;": '\U00002115', + "Not;": '\U00002AEC', + "NotCongruent;": '\U00002262', + "NotCupCap;": '\U0000226D', + "NotDoubleVerticalBar;": '\U00002226', + "NotElement;": '\U00002209', + "NotEqual;": '\U00002260', + "NotExists;": '\U00002204', + "NotGreater;": '\U0000226F', + "NotGreaterEqual;": '\U00002271', + "NotGreaterLess;": '\U00002279', + "NotGreaterTilde;": '\U00002275', + "NotLeftTriangle;": '\U000022EA', + "NotLeftTriangleEqual;": '\U000022EC', + "NotLess;": '\U0000226E', + "NotLessEqual;": '\U00002270', + "NotLessGreater;": '\U00002278', + "NotLessTilde;": '\U00002274', + "NotPrecedes;": '\U00002280', + "NotPrecedesSlantEqual;": '\U000022E0', + "NotReverseElement;": '\U0000220C', + "NotRightTriangle;": '\U000022EB', + "NotRightTriangleEqual;": '\U000022ED', + "NotSquareSubsetEqual;": '\U000022E2', + "NotSquareSupersetEqual;": '\U000022E3', + "NotSubsetEqual;": '\U00002288', + "NotSucceeds;": '\U00002281', + "NotSucceedsSlantEqual;": '\U000022E1', + "NotSupersetEqual;": '\U00002289', + "NotTilde;": '\U00002241', + "NotTildeEqual;": '\U00002244', + "NotTildeFullEqual;": '\U00002247', + "NotTildeTilde;": '\U00002249', + "NotVerticalBar;": '\U00002224', + "Nscr;": '\U0001D4A9', + "Ntilde;": '\U000000D1', + "Nu;": '\U0000039D', + "OElig;": '\U00000152', + "Oacute;": '\U000000D3', + "Ocirc;": '\U000000D4', + "Ocy;": '\U0000041E', + "Odblac;": '\U00000150', + "Ofr;": '\U0001D512', + "Ograve;": '\U000000D2', + "Omacr;": '\U0000014C', + "Omega;": '\U000003A9', + "Omicron;": '\U0000039F', + "Oopf;": '\U0001D546', + "OpenCurlyDoubleQuote;": '\U0000201C', + "OpenCurlyQuote;": '\U00002018', + "Or;": '\U00002A54', + "Oscr;": '\U0001D4AA', + "Oslash;": '\U000000D8', + "Otilde;": '\U000000D5', + "Otimes;": '\U00002A37', + "Ouml;": '\U000000D6', + "OverBar;": '\U0000203E', + "OverBrace;": '\U000023DE', + "OverBracket;": '\U000023B4', + "OverParenthesis;": '\U000023DC', + "PartialD;": '\U00002202', + "Pcy;": '\U0000041F', + "Pfr;": '\U0001D513', + "Phi;": '\U000003A6', + "Pi;": '\U000003A0', + "PlusMinus;": '\U000000B1', + "Poincareplane;": '\U0000210C', + "Popf;": '\U00002119', + "Pr;": '\U00002ABB', + "Precedes;": '\U0000227A', + "PrecedesEqual;": '\U00002AAF', + "PrecedesSlantEqual;": '\U0000227C', + "PrecedesTilde;": '\U0000227E', + "Prime;": '\U00002033', + "Product;": '\U0000220F', + "Proportion;": '\U00002237', + "Proportional;": '\U0000221D', + "Pscr;": '\U0001D4AB', + "Psi;": '\U000003A8', + "QUOT;": '\U00000022', + "Qfr;": '\U0001D514', + "Qopf;": '\U0000211A', + "Qscr;": '\U0001D4AC', + "RBarr;": '\U00002910', + "REG;": '\U000000AE', + "Racute;": '\U00000154', + "Rang;": '\U000027EB', + "Rarr;": '\U000021A0', + "Rarrtl;": '\U00002916', + "Rcaron;": '\U00000158', + "Rcedil;": '\U00000156', + "Rcy;": '\U00000420', + "Re;": '\U0000211C', + "ReverseElement;": '\U0000220B', + "ReverseEquilibrium;": '\U000021CB', + "ReverseUpEquilibrium;": '\U0000296F', + "Rfr;": '\U0000211C', + "Rho;": '\U000003A1', + "RightAngleBracket;": '\U000027E9', + "RightArrow;": '\U00002192', + "RightArrowBar;": '\U000021E5', + "RightArrowLeftArrow;": '\U000021C4', + "RightCeiling;": '\U00002309', + "RightDoubleBracket;": '\U000027E7', + "RightDownTeeVector;": '\U0000295D', + "RightDownVector;": '\U000021C2', + "RightDownVectorBar;": '\U00002955', + "RightFloor;": '\U0000230B', + "RightTee;": '\U000022A2', + "RightTeeArrow;": '\U000021A6', + "RightTeeVector;": '\U0000295B', + "RightTriangle;": '\U000022B3', + "RightTriangleBar;": '\U000029D0', + "RightTriangleEqual;": '\U000022B5', + "RightUpDownVector;": '\U0000294F', + "RightUpTeeVector;": '\U0000295C', + "RightUpVector;": '\U000021BE', + "RightUpVectorBar;": '\U00002954', + "RightVector;": '\U000021C0', + "RightVectorBar;": '\U00002953', + "Rightarrow;": '\U000021D2', + "Ropf;": '\U0000211D', + "RoundImplies;": '\U00002970', + "Rrightarrow;": '\U000021DB', + "Rscr;": '\U0000211B', + "Rsh;": '\U000021B1', + "RuleDelayed;": '\U000029F4', + "SHCHcy;": '\U00000429', + "SHcy;": '\U00000428', + "SOFTcy;": '\U0000042C', + "Sacute;": '\U0000015A', + "Sc;": '\U00002ABC', + "Scaron;": '\U00000160', + "Scedil;": '\U0000015E', + "Scirc;": '\U0000015C', + "Scy;": '\U00000421', + "Sfr;": '\U0001D516', + "ShortDownArrow;": '\U00002193', + "ShortLeftArrow;": '\U00002190', + "ShortRightArrow;": '\U00002192', + "ShortUpArrow;": '\U00002191', + "Sigma;": '\U000003A3', + "SmallCircle;": '\U00002218', + "Sopf;": '\U0001D54A', + "Sqrt;": '\U0000221A', + "Square;": '\U000025A1', + "SquareIntersection;": '\U00002293', + "SquareSubset;": '\U0000228F', + "SquareSubsetEqual;": '\U00002291', + "SquareSuperset;": '\U00002290', + "SquareSupersetEqual;": '\U00002292', + "SquareUnion;": '\U00002294', + "Sscr;": '\U0001D4AE', + "Star;": '\U000022C6', + "Sub;": '\U000022D0', + "Subset;": '\U000022D0', + "SubsetEqual;": '\U00002286', + "Succeeds;": '\U0000227B', + "SucceedsEqual;": '\U00002AB0', + "SucceedsSlantEqual;": '\U0000227D', + "SucceedsTilde;": '\U0000227F', + "SuchThat;": '\U0000220B', + "Sum;": '\U00002211', + "Sup;": '\U000022D1', + "Superset;": '\U00002283', + "SupersetEqual;": '\U00002287', + "Supset;": '\U000022D1', + "THORN;": '\U000000DE', + "TRADE;": '\U00002122', + "TSHcy;": '\U0000040B', + "TScy;": '\U00000426', + "Tab;": '\U00000009', + "Tau;": '\U000003A4', + "Tcaron;": '\U00000164', + "Tcedil;": '\U00000162', + "Tcy;": '\U00000422', + "Tfr;": '\U0001D517', + "Therefore;": '\U00002234', + "Theta;": '\U00000398', + "ThinSpace;": '\U00002009', + "Tilde;": '\U0000223C', + "TildeEqual;": '\U00002243', + "TildeFullEqual;": '\U00002245', + "TildeTilde;": '\U00002248', + "Topf;": '\U0001D54B', + "TripleDot;": '\U000020DB', + "Tscr;": '\U0001D4AF', + "Tstrok;": '\U00000166', + "Uacute;": '\U000000DA', + "Uarr;": '\U0000219F', + "Uarrocir;": '\U00002949', + "Ubrcy;": '\U0000040E', + "Ubreve;": '\U0000016C', + "Ucirc;": '\U000000DB', + "Ucy;": '\U00000423', + "Udblac;": '\U00000170', + "Ufr;": '\U0001D518', + "Ugrave;": '\U000000D9', + "Umacr;": '\U0000016A', + "UnderBar;": '\U0000005F', + "UnderBrace;": '\U000023DF', + "UnderBracket;": '\U000023B5', + "UnderParenthesis;": '\U000023DD', + "Union;": '\U000022C3', + "UnionPlus;": '\U0000228E', + "Uogon;": '\U00000172', + "Uopf;": '\U0001D54C', + "UpArrow;": '\U00002191', + "UpArrowBar;": '\U00002912', + "UpArrowDownArrow;": '\U000021C5', + "UpDownArrow;": '\U00002195', + "UpEquilibrium;": '\U0000296E', + "UpTee;": '\U000022A5', + "UpTeeArrow;": '\U000021A5', + "Uparrow;": '\U000021D1', + "Updownarrow;": '\U000021D5', + "UpperLeftArrow;": '\U00002196', + "UpperRightArrow;": '\U00002197', + "Upsi;": '\U000003D2', + "Upsilon;": '\U000003A5', + "Uring;": '\U0000016E', + "Uscr;": '\U0001D4B0', + "Utilde;": '\U00000168', + "Uuml;": '\U000000DC', + "VDash;": '\U000022AB', + "Vbar;": '\U00002AEB', + "Vcy;": '\U00000412', + "Vdash;": '\U000022A9', + "Vdashl;": '\U00002AE6', + "Vee;": '\U000022C1', + "Verbar;": '\U00002016', + "Vert;": '\U00002016', + "VerticalBar;": '\U00002223', + "VerticalLine;": '\U0000007C', + "VerticalSeparator;": '\U00002758', + "VerticalTilde;": '\U00002240', + "VeryThinSpace;": '\U0000200A', + "Vfr;": '\U0001D519', + "Vopf;": '\U0001D54D', + "Vscr;": '\U0001D4B1', + "Vvdash;": '\U000022AA', + "Wcirc;": '\U00000174', + "Wedge;": '\U000022C0', + "Wfr;": '\U0001D51A', + "Wopf;": '\U0001D54E', + "Wscr;": '\U0001D4B2', + "Xfr;": '\U0001D51B', + "Xi;": '\U0000039E', + "Xopf;": '\U0001D54F', + "Xscr;": '\U0001D4B3', + "YAcy;": '\U0000042F', + "YIcy;": '\U00000407', + "YUcy;": '\U0000042E', + "Yacute;": '\U000000DD', + "Ycirc;": '\U00000176', + "Ycy;": '\U0000042B', + "Yfr;": '\U0001D51C', + "Yopf;": '\U0001D550', + "Yscr;": '\U0001D4B4', + "Yuml;": '\U00000178', + "ZHcy;": '\U00000416', + "Zacute;": '\U00000179', + "Zcaron;": '\U0000017D', + "Zcy;": '\U00000417', + "Zdot;": '\U0000017B', + "ZeroWidthSpace;": '\U0000200B', + "Zeta;": '\U00000396', + "Zfr;": '\U00002128', + "Zopf;": '\U00002124', + "Zscr;": '\U0001D4B5', + "aacute;": '\U000000E1', + "abreve;": '\U00000103', + "ac;": '\U0000223E', + "acd;": '\U0000223F', + "acirc;": '\U000000E2', + "acute;": '\U000000B4', + "acy;": '\U00000430', + "aelig;": '\U000000E6', + "af;": '\U00002061', + "afr;": '\U0001D51E', + "agrave;": '\U000000E0', + "alefsym;": '\U00002135', + "aleph;": '\U00002135', + "alpha;": '\U000003B1', + "amacr;": '\U00000101', + "amalg;": '\U00002A3F', + "amp;": '\U00000026', + "and;": '\U00002227', + "andand;": '\U00002A55', + "andd;": '\U00002A5C', + "andslope;": '\U00002A58', + "andv;": '\U00002A5A', + "ang;": '\U00002220', + "ange;": '\U000029A4', + "angle;": '\U00002220', + "angmsd;": '\U00002221', + "angmsdaa;": '\U000029A8', + "angmsdab;": '\U000029A9', + "angmsdac;": '\U000029AA', + "angmsdad;": '\U000029AB', + "angmsdae;": '\U000029AC', + "angmsdaf;": '\U000029AD', + "angmsdag;": '\U000029AE', + "angmsdah;": '\U000029AF', + "angrt;": '\U0000221F', + "angrtvb;": '\U000022BE', + "angrtvbd;": '\U0000299D', + "angsph;": '\U00002222', + "angst;": '\U000000C5', + "angzarr;": '\U0000237C', + "aogon;": '\U00000105', + "aopf;": '\U0001D552', + "ap;": '\U00002248', + "apE;": '\U00002A70', + "apacir;": '\U00002A6F', + "ape;": '\U0000224A', + "apid;": '\U0000224B', + "apos;": '\U00000027', + "approx;": '\U00002248', + "approxeq;": '\U0000224A', + "aring;": '\U000000E5', + "ascr;": '\U0001D4B6', + "ast;": '\U0000002A', + "asymp;": '\U00002248', + "asympeq;": '\U0000224D', + "atilde;": '\U000000E3', + "auml;": '\U000000E4', + "awconint;": '\U00002233', + "awint;": '\U00002A11', + "bNot;": '\U00002AED', + "backcong;": '\U0000224C', + "backepsilon;": '\U000003F6', + "backprime;": '\U00002035', + "backsim;": '\U0000223D', + "backsimeq;": '\U000022CD', + "barvee;": '\U000022BD', + "barwed;": '\U00002305', + "barwedge;": '\U00002305', + "bbrk;": '\U000023B5', + "bbrktbrk;": '\U000023B6', + "bcong;": '\U0000224C', + "bcy;": '\U00000431', + "bdquo;": '\U0000201E', + "becaus;": '\U00002235', + "because;": '\U00002235', + "bemptyv;": '\U000029B0', + "bepsi;": '\U000003F6', + "bernou;": '\U0000212C', + "beta;": '\U000003B2', + "beth;": '\U00002136', + "between;": '\U0000226C', + "bfr;": '\U0001D51F', + "bigcap;": '\U000022C2', + "bigcirc;": '\U000025EF', + "bigcup;": '\U000022C3', + "bigodot;": '\U00002A00', + "bigoplus;": '\U00002A01', + "bigotimes;": '\U00002A02', + "bigsqcup;": '\U00002A06', + "bigstar;": '\U00002605', + "bigtriangledown;": '\U000025BD', + "bigtriangleup;": '\U000025B3', + "biguplus;": '\U00002A04', + "bigvee;": '\U000022C1', + "bigwedge;": '\U000022C0', + "bkarow;": '\U0000290D', + "blacklozenge;": '\U000029EB', + "blacksquare;": '\U000025AA', + "blacktriangle;": '\U000025B4', + "blacktriangledown;": '\U000025BE', + "blacktriangleleft;": '\U000025C2', + "blacktriangleright;": '\U000025B8', + "blank;": '\U00002423', + "blk12;": '\U00002592', + "blk14;": '\U00002591', + "blk34;": '\U00002593', + "block;": '\U00002588', + "bnot;": '\U00002310', + "bopf;": '\U0001D553', + "bot;": '\U000022A5', + "bottom;": '\U000022A5', + "bowtie;": '\U000022C8', + "boxDL;": '\U00002557', + "boxDR;": '\U00002554', + "boxDl;": '\U00002556', + "boxDr;": '\U00002553', + "boxH;": '\U00002550', + "boxHD;": '\U00002566', + "boxHU;": '\U00002569', + "boxHd;": '\U00002564', + "boxHu;": '\U00002567', + "boxUL;": '\U0000255D', + "boxUR;": '\U0000255A', + "boxUl;": '\U0000255C', + "boxUr;": '\U00002559', + "boxV;": '\U00002551', + "boxVH;": '\U0000256C', + "boxVL;": '\U00002563', + "boxVR;": '\U00002560', + "boxVh;": '\U0000256B', + "boxVl;": '\U00002562', + "boxVr;": '\U0000255F', + "boxbox;": '\U000029C9', + "boxdL;": '\U00002555', + "boxdR;": '\U00002552', + "boxdl;": '\U00002510', + "boxdr;": '\U0000250C', + "boxh;": '\U00002500', + "boxhD;": '\U00002565', + "boxhU;": '\U00002568', + "boxhd;": '\U0000252C', + "boxhu;": '\U00002534', + "boxminus;": '\U0000229F', + "boxplus;": '\U0000229E', + "boxtimes;": '\U000022A0', + "boxuL;": '\U0000255B', + "boxuR;": '\U00002558', + "boxul;": '\U00002518', + "boxur;": '\U00002514', + "boxv;": '\U00002502', + "boxvH;": '\U0000256A', + "boxvL;": '\U00002561', + "boxvR;": '\U0000255E', + "boxvh;": '\U0000253C', + "boxvl;": '\U00002524', + "boxvr;": '\U0000251C', + "bprime;": '\U00002035', + "breve;": '\U000002D8', + "brvbar;": '\U000000A6', + "bscr;": '\U0001D4B7', + "bsemi;": '\U0000204F', + "bsim;": '\U0000223D', + "bsime;": '\U000022CD', + "bsol;": '\U0000005C', + "bsolb;": '\U000029C5', + "bsolhsub;": '\U000027C8', + "bull;": '\U00002022', + "bullet;": '\U00002022', + "bump;": '\U0000224E', + "bumpE;": '\U00002AAE', + "bumpe;": '\U0000224F', + "bumpeq;": '\U0000224F', + "cacute;": '\U00000107', + "cap;": '\U00002229', + "capand;": '\U00002A44', + "capbrcup;": '\U00002A49', + "capcap;": '\U00002A4B', + "capcup;": '\U00002A47', + "capdot;": '\U00002A40', + "caret;": '\U00002041', + "caron;": '\U000002C7', + "ccaps;": '\U00002A4D', + "ccaron;": '\U0000010D', + "ccedil;": '\U000000E7', + "ccirc;": '\U00000109', + "ccups;": '\U00002A4C', + "ccupssm;": '\U00002A50', + "cdot;": '\U0000010B', + "cedil;": '\U000000B8', + "cemptyv;": '\U000029B2', + "cent;": '\U000000A2', + "centerdot;": '\U000000B7', + "cfr;": '\U0001D520', + "chcy;": '\U00000447', + "check;": '\U00002713', + "checkmark;": '\U00002713', + "chi;": '\U000003C7', + "cir;": '\U000025CB', + "cirE;": '\U000029C3', + "circ;": '\U000002C6', + "circeq;": '\U00002257', + "circlearrowleft;": '\U000021BA', + "circlearrowright;": '\U000021BB', + "circledR;": '\U000000AE', + "circledS;": '\U000024C8', + "circledast;": '\U0000229B', + "circledcirc;": '\U0000229A', + "circleddash;": '\U0000229D', + "cire;": '\U00002257', + "cirfnint;": '\U00002A10', + "cirmid;": '\U00002AEF', + "cirscir;": '\U000029C2', + "clubs;": '\U00002663', + "clubsuit;": '\U00002663', + "colon;": '\U0000003A', + "colone;": '\U00002254', + "coloneq;": '\U00002254', + "comma;": '\U0000002C', + "commat;": '\U00000040', + "comp;": '\U00002201', + "compfn;": '\U00002218', + "complement;": '\U00002201', + "complexes;": '\U00002102', + "cong;": '\U00002245', + "congdot;": '\U00002A6D', + "conint;": '\U0000222E', + "copf;": '\U0001D554', + "coprod;": '\U00002210', + "copy;": '\U000000A9', + "copysr;": '\U00002117', + "crarr;": '\U000021B5', + "cross;": '\U00002717', + "cscr;": '\U0001D4B8', + "csub;": '\U00002ACF', + "csube;": '\U00002AD1', + "csup;": '\U00002AD0', + "csupe;": '\U00002AD2', + "ctdot;": '\U000022EF', + "cudarrl;": '\U00002938', + "cudarrr;": '\U00002935', + "cuepr;": '\U000022DE', + "cuesc;": '\U000022DF', + "cularr;": '\U000021B6', + "cularrp;": '\U0000293D', + "cup;": '\U0000222A', + "cupbrcap;": '\U00002A48', + "cupcap;": '\U00002A46', + "cupcup;": '\U00002A4A', + "cupdot;": '\U0000228D', + "cupor;": '\U00002A45', + "curarr;": '\U000021B7', + "curarrm;": '\U0000293C', + "curlyeqprec;": '\U000022DE', + "curlyeqsucc;": '\U000022DF', + "curlyvee;": '\U000022CE', + "curlywedge;": '\U000022CF', + "curren;": '\U000000A4', + "curvearrowleft;": '\U000021B6', + "curvearrowright;": '\U000021B7', + "cuvee;": '\U000022CE', + "cuwed;": '\U000022CF', + "cwconint;": '\U00002232', + "cwint;": '\U00002231', + "cylcty;": '\U0000232D', + "dArr;": '\U000021D3', + "dHar;": '\U00002965', + "dagger;": '\U00002020', + "daleth;": '\U00002138', + "darr;": '\U00002193', + "dash;": '\U00002010', + "dashv;": '\U000022A3', + "dbkarow;": '\U0000290F', + "dblac;": '\U000002DD', + "dcaron;": '\U0000010F', + "dcy;": '\U00000434', + "dd;": '\U00002146', + "ddagger;": '\U00002021', + "ddarr;": '\U000021CA', + "ddotseq;": '\U00002A77', + "deg;": '\U000000B0', + "delta;": '\U000003B4', + "demptyv;": '\U000029B1', + "dfisht;": '\U0000297F', + "dfr;": '\U0001D521', + "dharl;": '\U000021C3', + "dharr;": '\U000021C2', + "diam;": '\U000022C4', + "diamond;": '\U000022C4', + "diamondsuit;": '\U00002666', + "diams;": '\U00002666', + "die;": '\U000000A8', + "digamma;": '\U000003DD', + "disin;": '\U000022F2', + "div;": '\U000000F7', + "divide;": '\U000000F7', + "divideontimes;": '\U000022C7', + "divonx;": '\U000022C7', + "djcy;": '\U00000452', + "dlcorn;": '\U0000231E', + "dlcrop;": '\U0000230D', + "dollar;": '\U00000024', + "dopf;": '\U0001D555', + "dot;": '\U000002D9', + "doteq;": '\U00002250', + "doteqdot;": '\U00002251', + "dotminus;": '\U00002238', + "dotplus;": '\U00002214', + "dotsquare;": '\U000022A1', + "doublebarwedge;": '\U00002306', + "downarrow;": '\U00002193', + "downdownarrows;": '\U000021CA', + "downharpoonleft;": '\U000021C3', + "downharpoonright;": '\U000021C2', + "drbkarow;": '\U00002910', + "drcorn;": '\U0000231F', + "drcrop;": '\U0000230C', + "dscr;": '\U0001D4B9', + "dscy;": '\U00000455', + "dsol;": '\U000029F6', + "dstrok;": '\U00000111', + "dtdot;": '\U000022F1', + "dtri;": '\U000025BF', + "dtrif;": '\U000025BE', + "duarr;": '\U000021F5', + "duhar;": '\U0000296F', + "dwangle;": '\U000029A6', + "dzcy;": '\U0000045F', + "dzigrarr;": '\U000027FF', + "eDDot;": '\U00002A77', + "eDot;": '\U00002251', + "eacute;": '\U000000E9', + "easter;": '\U00002A6E', + "ecaron;": '\U0000011B', + "ecir;": '\U00002256', + "ecirc;": '\U000000EA', + "ecolon;": '\U00002255', + "ecy;": '\U0000044D', + "edot;": '\U00000117', + "ee;": '\U00002147', + "efDot;": '\U00002252', + "efr;": '\U0001D522', + "eg;": '\U00002A9A', + "egrave;": '\U000000E8', + "egs;": '\U00002A96', + "egsdot;": '\U00002A98', + "el;": '\U00002A99', + "elinters;": '\U000023E7', + "ell;": '\U00002113', + "els;": '\U00002A95', + "elsdot;": '\U00002A97', + "emacr;": '\U00000113', + "empty;": '\U00002205', + "emptyset;": '\U00002205', + "emptyv;": '\U00002205', + "emsp;": '\U00002003', + "emsp13;": '\U00002004', + "emsp14;": '\U00002005', + "eng;": '\U0000014B', + "ensp;": '\U00002002', + "eogon;": '\U00000119', + "eopf;": '\U0001D556', + "epar;": '\U000022D5', + "eparsl;": '\U000029E3', + "eplus;": '\U00002A71', + "epsi;": '\U000003B5', + "epsilon;": '\U000003B5', + "epsiv;": '\U000003F5', + "eqcirc;": '\U00002256', + "eqcolon;": '\U00002255', + "eqsim;": '\U00002242', + "eqslantgtr;": '\U00002A96', + "eqslantless;": '\U00002A95', + "equals;": '\U0000003D', + "equest;": '\U0000225F', + "equiv;": '\U00002261', + "equivDD;": '\U00002A78', + "eqvparsl;": '\U000029E5', + "erDot;": '\U00002253', + "erarr;": '\U00002971', + "escr;": '\U0000212F', + "esdot;": '\U00002250', + "esim;": '\U00002242', + "eta;": '\U000003B7', + "eth;": '\U000000F0', + "euml;": '\U000000EB', + "euro;": '\U000020AC', + "excl;": '\U00000021', + "exist;": '\U00002203', + "expectation;": '\U00002130', + "exponentiale;": '\U00002147', + "fallingdotseq;": '\U00002252', + "fcy;": '\U00000444', + "female;": '\U00002640', + "ffilig;": '\U0000FB03', + "fflig;": '\U0000FB00', + "ffllig;": '\U0000FB04', + "ffr;": '\U0001D523', + "filig;": '\U0000FB01', + "flat;": '\U0000266D', + "fllig;": '\U0000FB02', + "fltns;": '\U000025B1', + "fnof;": '\U00000192', + "fopf;": '\U0001D557', + "forall;": '\U00002200', + "fork;": '\U000022D4', + "forkv;": '\U00002AD9', + "fpartint;": '\U00002A0D', + "frac12;": '\U000000BD', + "frac13;": '\U00002153', + "frac14;": '\U000000BC', + "frac15;": '\U00002155', + "frac16;": '\U00002159', + "frac18;": '\U0000215B', + "frac23;": '\U00002154', + "frac25;": '\U00002156', + "frac34;": '\U000000BE', + "frac35;": '\U00002157', + "frac38;": '\U0000215C', + "frac45;": '\U00002158', + "frac56;": '\U0000215A', + "frac58;": '\U0000215D', + "frac78;": '\U0000215E', + "frasl;": '\U00002044', + "frown;": '\U00002322', + "fscr;": '\U0001D4BB', + "gE;": '\U00002267', + "gEl;": '\U00002A8C', + "gacute;": '\U000001F5', + "gamma;": '\U000003B3', + "gammad;": '\U000003DD', + "gap;": '\U00002A86', + "gbreve;": '\U0000011F', + "gcirc;": '\U0000011D', + "gcy;": '\U00000433', + "gdot;": '\U00000121', + "ge;": '\U00002265', + "gel;": '\U000022DB', + "geq;": '\U00002265', + "geqq;": '\U00002267', + "geqslant;": '\U00002A7E', + "ges;": '\U00002A7E', + "gescc;": '\U00002AA9', + "gesdot;": '\U00002A80', + "gesdoto;": '\U00002A82', + "gesdotol;": '\U00002A84', + "gesles;": '\U00002A94', + "gfr;": '\U0001D524', + "gg;": '\U0000226B', + "ggg;": '\U000022D9', + "gimel;": '\U00002137', + "gjcy;": '\U00000453', + "gl;": '\U00002277', + "glE;": '\U00002A92', + "gla;": '\U00002AA5', + "glj;": '\U00002AA4', + "gnE;": '\U00002269', + "gnap;": '\U00002A8A', + "gnapprox;": '\U00002A8A', + "gne;": '\U00002A88', + "gneq;": '\U00002A88', + "gneqq;": '\U00002269', + "gnsim;": '\U000022E7', + "gopf;": '\U0001D558', + "grave;": '\U00000060', + "gscr;": '\U0000210A', + "gsim;": '\U00002273', + "gsime;": '\U00002A8E', + "gsiml;": '\U00002A90', + "gt;": '\U0000003E', + "gtcc;": '\U00002AA7', + "gtcir;": '\U00002A7A', + "gtdot;": '\U000022D7', + "gtlPar;": '\U00002995', + "gtquest;": '\U00002A7C', + "gtrapprox;": '\U00002A86', + "gtrarr;": '\U00002978', + "gtrdot;": '\U000022D7', + "gtreqless;": '\U000022DB', + "gtreqqless;": '\U00002A8C', + "gtrless;": '\U00002277', + "gtrsim;": '\U00002273', + "hArr;": '\U000021D4', + "hairsp;": '\U0000200A', + "half;": '\U000000BD', + "hamilt;": '\U0000210B', + "hardcy;": '\U0000044A', + "harr;": '\U00002194', + "harrcir;": '\U00002948', + "harrw;": '\U000021AD', + "hbar;": '\U0000210F', + "hcirc;": '\U00000125', + "hearts;": '\U00002665', + "heartsuit;": '\U00002665', + "hellip;": '\U00002026', + "hercon;": '\U000022B9', + "hfr;": '\U0001D525', + "hksearow;": '\U00002925', + "hkswarow;": '\U00002926', + "hoarr;": '\U000021FF', + "homtht;": '\U0000223B', + "hookleftarrow;": '\U000021A9', + "hookrightarrow;": '\U000021AA', + "hopf;": '\U0001D559', + "horbar;": '\U00002015', + "hscr;": '\U0001D4BD', + "hslash;": '\U0000210F', + "hstrok;": '\U00000127', + "hybull;": '\U00002043', + "hyphen;": '\U00002010', + "iacute;": '\U000000ED', + "ic;": '\U00002063', + "icirc;": '\U000000EE', + "icy;": '\U00000438', + "iecy;": '\U00000435', + "iexcl;": '\U000000A1', + "iff;": '\U000021D4', + "ifr;": '\U0001D526', + "igrave;": '\U000000EC', + "ii;": '\U00002148', + "iiiint;": '\U00002A0C', + "iiint;": '\U0000222D', + "iinfin;": '\U000029DC', + "iiota;": '\U00002129', + "ijlig;": '\U00000133', + "imacr;": '\U0000012B', + "image;": '\U00002111', + "imagline;": '\U00002110', + "imagpart;": '\U00002111', + "imath;": '\U00000131', + "imof;": '\U000022B7', + "imped;": '\U000001B5', + "in;": '\U00002208', + "incare;": '\U00002105', + "infin;": '\U0000221E', + "infintie;": '\U000029DD', + "inodot;": '\U00000131', + "int;": '\U0000222B', + "intcal;": '\U000022BA', + "integers;": '\U00002124', + "intercal;": '\U000022BA', + "intlarhk;": '\U00002A17', + "intprod;": '\U00002A3C', + "iocy;": '\U00000451', + "iogon;": '\U0000012F', + "iopf;": '\U0001D55A', + "iota;": '\U000003B9', + "iprod;": '\U00002A3C', + "iquest;": '\U000000BF', + "iscr;": '\U0001D4BE', + "isin;": '\U00002208', + "isinE;": '\U000022F9', + "isindot;": '\U000022F5', + "isins;": '\U000022F4', + "isinsv;": '\U000022F3', + "isinv;": '\U00002208', + "it;": '\U00002062', + "itilde;": '\U00000129', + "iukcy;": '\U00000456', + "iuml;": '\U000000EF', + "jcirc;": '\U00000135', + "jcy;": '\U00000439', + "jfr;": '\U0001D527', + "jmath;": '\U00000237', + "jopf;": '\U0001D55B', + "jscr;": '\U0001D4BF', + "jsercy;": '\U00000458', + "jukcy;": '\U00000454', + "kappa;": '\U000003BA', + "kappav;": '\U000003F0', + "kcedil;": '\U00000137', + "kcy;": '\U0000043A', + "kfr;": '\U0001D528', + "kgreen;": '\U00000138', + "khcy;": '\U00000445', + "kjcy;": '\U0000045C', + "kopf;": '\U0001D55C', + "kscr;": '\U0001D4C0', + "lAarr;": '\U000021DA', + "lArr;": '\U000021D0', + "lAtail;": '\U0000291B', + "lBarr;": '\U0000290E', + "lE;": '\U00002266', + "lEg;": '\U00002A8B', + "lHar;": '\U00002962', + "lacute;": '\U0000013A', + "laemptyv;": '\U000029B4', + "lagran;": '\U00002112', + "lambda;": '\U000003BB', + "lang;": '\U000027E8', + "langd;": '\U00002991', + "langle;": '\U000027E8', + "lap;": '\U00002A85', + "laquo;": '\U000000AB', + "larr;": '\U00002190', + "larrb;": '\U000021E4', + "larrbfs;": '\U0000291F', + "larrfs;": '\U0000291D', + "larrhk;": '\U000021A9', + "larrlp;": '\U000021AB', + "larrpl;": '\U00002939', + "larrsim;": '\U00002973', + "larrtl;": '\U000021A2', + "lat;": '\U00002AAB', + "latail;": '\U00002919', + "late;": '\U00002AAD', + "lbarr;": '\U0000290C', + "lbbrk;": '\U00002772', + "lbrace;": '\U0000007B', + "lbrack;": '\U0000005B', + "lbrke;": '\U0000298B', + "lbrksld;": '\U0000298F', + "lbrkslu;": '\U0000298D', + "lcaron;": '\U0000013E', + "lcedil;": '\U0000013C', + "lceil;": '\U00002308', + "lcub;": '\U0000007B', + "lcy;": '\U0000043B', + "ldca;": '\U00002936', + "ldquo;": '\U0000201C', + "ldquor;": '\U0000201E', + "ldrdhar;": '\U00002967', + "ldrushar;": '\U0000294B', + "ldsh;": '\U000021B2', + "le;": '\U00002264', + "leftarrow;": '\U00002190', + "leftarrowtail;": '\U000021A2', + "leftharpoondown;": '\U000021BD', + "leftharpoonup;": '\U000021BC', + "leftleftarrows;": '\U000021C7', + "leftrightarrow;": '\U00002194', + "leftrightarrows;": '\U000021C6', + "leftrightharpoons;": '\U000021CB', + "leftrightsquigarrow;": '\U000021AD', + "leftthreetimes;": '\U000022CB', + "leg;": '\U000022DA', + "leq;": '\U00002264', + "leqq;": '\U00002266', + "leqslant;": '\U00002A7D', + "les;": '\U00002A7D', + "lescc;": '\U00002AA8', + "lesdot;": '\U00002A7F', + "lesdoto;": '\U00002A81', + "lesdotor;": '\U00002A83', + "lesges;": '\U00002A93', + "lessapprox;": '\U00002A85', + "lessdot;": '\U000022D6', + "lesseqgtr;": '\U000022DA', + "lesseqqgtr;": '\U00002A8B', + "lessgtr;": '\U00002276', + "lesssim;": '\U00002272', + "lfisht;": '\U0000297C', + "lfloor;": '\U0000230A', + "lfr;": '\U0001D529', + "lg;": '\U00002276', + "lgE;": '\U00002A91', + "lhard;": '\U000021BD', + "lharu;": '\U000021BC', + "lharul;": '\U0000296A', + "lhblk;": '\U00002584', + "ljcy;": '\U00000459', + "ll;": '\U0000226A', + "llarr;": '\U000021C7', + "llcorner;": '\U0000231E', + "llhard;": '\U0000296B', + "lltri;": '\U000025FA', + "lmidot;": '\U00000140', + "lmoust;": '\U000023B0', + "lmoustache;": '\U000023B0', + "lnE;": '\U00002268', + "lnap;": '\U00002A89', + "lnapprox;": '\U00002A89', + "lne;": '\U00002A87', + "lneq;": '\U00002A87', + "lneqq;": '\U00002268', + "lnsim;": '\U000022E6', + "loang;": '\U000027EC', + "loarr;": '\U000021FD', + "lobrk;": '\U000027E6', + "longleftarrow;": '\U000027F5', + "longleftrightarrow;": '\U000027F7', + "longmapsto;": '\U000027FC', + "longrightarrow;": '\U000027F6', + "looparrowleft;": '\U000021AB', + "looparrowright;": '\U000021AC', + "lopar;": '\U00002985', + "lopf;": '\U0001D55D', + "loplus;": '\U00002A2D', + "lotimes;": '\U00002A34', + "lowast;": '\U00002217', + "lowbar;": '\U0000005F', + "loz;": '\U000025CA', + "lozenge;": '\U000025CA', + "lozf;": '\U000029EB', + "lpar;": '\U00000028', + "lparlt;": '\U00002993', + "lrarr;": '\U000021C6', + "lrcorner;": '\U0000231F', + "lrhar;": '\U000021CB', + "lrhard;": '\U0000296D', + "lrm;": '\U0000200E', + "lrtri;": '\U000022BF', + "lsaquo;": '\U00002039', + "lscr;": '\U0001D4C1', + "lsh;": '\U000021B0', + "lsim;": '\U00002272', + "lsime;": '\U00002A8D', + "lsimg;": '\U00002A8F', + "lsqb;": '\U0000005B', + "lsquo;": '\U00002018', + "lsquor;": '\U0000201A', + "lstrok;": '\U00000142', + "lt;": '\U0000003C', + "ltcc;": '\U00002AA6', + "ltcir;": '\U00002A79', + "ltdot;": '\U000022D6', + "lthree;": '\U000022CB', + "ltimes;": '\U000022C9', + "ltlarr;": '\U00002976', + "ltquest;": '\U00002A7B', + "ltrPar;": '\U00002996', + "ltri;": '\U000025C3', + "ltrie;": '\U000022B4', + "ltrif;": '\U000025C2', + "lurdshar;": '\U0000294A', + "luruhar;": '\U00002966', + "mDDot;": '\U0000223A', + "macr;": '\U000000AF', + "male;": '\U00002642', + "malt;": '\U00002720', + "maltese;": '\U00002720', + "map;": '\U000021A6', + "mapsto;": '\U000021A6', + "mapstodown;": '\U000021A7', + "mapstoleft;": '\U000021A4', + "mapstoup;": '\U000021A5', + "marker;": '\U000025AE', + "mcomma;": '\U00002A29', + "mcy;": '\U0000043C', + "mdash;": '\U00002014', + "measuredangle;": '\U00002221', + "mfr;": '\U0001D52A', + "mho;": '\U00002127', + "micro;": '\U000000B5', + "mid;": '\U00002223', + "midast;": '\U0000002A', + "midcir;": '\U00002AF0', + "middot;": '\U000000B7', + "minus;": '\U00002212', + "minusb;": '\U0000229F', + "minusd;": '\U00002238', + "minusdu;": '\U00002A2A', + "mlcp;": '\U00002ADB', + "mldr;": '\U00002026', + "mnplus;": '\U00002213', + "models;": '\U000022A7', + "mopf;": '\U0001D55E', + "mp;": '\U00002213', + "mscr;": '\U0001D4C2', + "mstpos;": '\U0000223E', + "mu;": '\U000003BC', + "multimap;": '\U000022B8', + "mumap;": '\U000022B8', + "nLeftarrow;": '\U000021CD', + "nLeftrightarrow;": '\U000021CE', + "nRightarrow;": '\U000021CF', + "nVDash;": '\U000022AF', + "nVdash;": '\U000022AE', + "nabla;": '\U00002207', + "nacute;": '\U00000144', + "nap;": '\U00002249', + "napos;": '\U00000149', + "napprox;": '\U00002249', + "natur;": '\U0000266E', + "natural;": '\U0000266E', + "naturals;": '\U00002115', + "nbsp;": '\U000000A0', + "ncap;": '\U00002A43', + "ncaron;": '\U00000148', + "ncedil;": '\U00000146', + "ncong;": '\U00002247', + "ncup;": '\U00002A42', + "ncy;": '\U0000043D', + "ndash;": '\U00002013', + "ne;": '\U00002260', + "neArr;": '\U000021D7', + "nearhk;": '\U00002924', + "nearr;": '\U00002197', + "nearrow;": '\U00002197', + "nequiv;": '\U00002262', + "nesear;": '\U00002928', + "nexist;": '\U00002204', + "nexists;": '\U00002204', + "nfr;": '\U0001D52B', + "nge;": '\U00002271', + "ngeq;": '\U00002271', + "ngsim;": '\U00002275', + "ngt;": '\U0000226F', + "ngtr;": '\U0000226F', + "nhArr;": '\U000021CE', + "nharr;": '\U000021AE', + "nhpar;": '\U00002AF2', + "ni;": '\U0000220B', + "nis;": '\U000022FC', + "nisd;": '\U000022FA', + "niv;": '\U0000220B', + "njcy;": '\U0000045A', + "nlArr;": '\U000021CD', + "nlarr;": '\U0000219A', + "nldr;": '\U00002025', + "nle;": '\U00002270', + "nleftarrow;": '\U0000219A', + "nleftrightarrow;": '\U000021AE', + "nleq;": '\U00002270', + "nless;": '\U0000226E', + "nlsim;": '\U00002274', + "nlt;": '\U0000226E', + "nltri;": '\U000022EA', + "nltrie;": '\U000022EC', + "nmid;": '\U00002224', + "nopf;": '\U0001D55F', + "not;": '\U000000AC', + "notin;": '\U00002209', + "notinva;": '\U00002209', + "notinvb;": '\U000022F7', + "notinvc;": '\U000022F6', + "notni;": '\U0000220C', + "notniva;": '\U0000220C', + "notnivb;": '\U000022FE', + "notnivc;": '\U000022FD', + "npar;": '\U00002226', + "nparallel;": '\U00002226', + "npolint;": '\U00002A14', + "npr;": '\U00002280', + "nprcue;": '\U000022E0', + "nprec;": '\U00002280', + "nrArr;": '\U000021CF', + "nrarr;": '\U0000219B', + "nrightarrow;": '\U0000219B', + "nrtri;": '\U000022EB', + "nrtrie;": '\U000022ED', + "nsc;": '\U00002281', + "nsccue;": '\U000022E1', + "nscr;": '\U0001D4C3', + "nshortmid;": '\U00002224', + "nshortparallel;": '\U00002226', + "nsim;": '\U00002241', + "nsime;": '\U00002244', + "nsimeq;": '\U00002244', + "nsmid;": '\U00002224', + "nspar;": '\U00002226', + "nsqsube;": '\U000022E2', + "nsqsupe;": '\U000022E3', + "nsub;": '\U00002284', + "nsube;": '\U00002288', + "nsubseteq;": '\U00002288', + "nsucc;": '\U00002281', + "nsup;": '\U00002285', + "nsupe;": '\U00002289', + "nsupseteq;": '\U00002289', + "ntgl;": '\U00002279', + "ntilde;": '\U000000F1', + "ntlg;": '\U00002278', + "ntriangleleft;": '\U000022EA', + "ntrianglelefteq;": '\U000022EC', + "ntriangleright;": '\U000022EB', + "ntrianglerighteq;": '\U000022ED', + "nu;": '\U000003BD', + "num;": '\U00000023', + "numero;": '\U00002116', + "numsp;": '\U00002007', + "nvDash;": '\U000022AD', + "nvHarr;": '\U00002904', + "nvdash;": '\U000022AC', + "nvinfin;": '\U000029DE', + "nvlArr;": '\U00002902', + "nvrArr;": '\U00002903', + "nwArr;": '\U000021D6', + "nwarhk;": '\U00002923', + "nwarr;": '\U00002196', + "nwarrow;": '\U00002196', + "nwnear;": '\U00002927', + "oS;": '\U000024C8', + "oacute;": '\U000000F3', + "oast;": '\U0000229B', + "ocir;": '\U0000229A', + "ocirc;": '\U000000F4', + "ocy;": '\U0000043E', + "odash;": '\U0000229D', + "odblac;": '\U00000151', + "odiv;": '\U00002A38', + "odot;": '\U00002299', + "odsold;": '\U000029BC', + "oelig;": '\U00000153', + "ofcir;": '\U000029BF', + "ofr;": '\U0001D52C', + "ogon;": '\U000002DB', + "ograve;": '\U000000F2', + "ogt;": '\U000029C1', + "ohbar;": '\U000029B5', + "ohm;": '\U000003A9', + "oint;": '\U0000222E', + "olarr;": '\U000021BA', + "olcir;": '\U000029BE', + "olcross;": '\U000029BB', + "oline;": '\U0000203E', + "olt;": '\U000029C0', + "omacr;": '\U0000014D', + "omega;": '\U000003C9', + "omicron;": '\U000003BF', + "omid;": '\U000029B6', + "ominus;": '\U00002296', + "oopf;": '\U0001D560', + "opar;": '\U000029B7', + "operp;": '\U000029B9', + "oplus;": '\U00002295', + "or;": '\U00002228', + "orarr;": '\U000021BB', + "ord;": '\U00002A5D', + "order;": '\U00002134', + "orderof;": '\U00002134', + "ordf;": '\U000000AA', + "ordm;": '\U000000BA', + "origof;": '\U000022B6', + "oror;": '\U00002A56', + "orslope;": '\U00002A57', + "orv;": '\U00002A5B', + "oscr;": '\U00002134', + "oslash;": '\U000000F8', + "osol;": '\U00002298', + "otilde;": '\U000000F5', + "otimes;": '\U00002297', + "otimesas;": '\U00002A36', + "ouml;": '\U000000F6', + "ovbar;": '\U0000233D', + "par;": '\U00002225', + "para;": '\U000000B6', + "parallel;": '\U00002225', + "parsim;": '\U00002AF3', + "parsl;": '\U00002AFD', + "part;": '\U00002202', + "pcy;": '\U0000043F', + "percnt;": '\U00000025', + "period;": '\U0000002E', + "permil;": '\U00002030', + "perp;": '\U000022A5', + "pertenk;": '\U00002031', + "pfr;": '\U0001D52D', + "phi;": '\U000003C6', + "phiv;": '\U000003D5', + "phmmat;": '\U00002133', + "phone;": '\U0000260E', + "pi;": '\U000003C0', + "pitchfork;": '\U000022D4', + "piv;": '\U000003D6', + "planck;": '\U0000210F', + "planckh;": '\U0000210E', + "plankv;": '\U0000210F', + "plus;": '\U0000002B', + "plusacir;": '\U00002A23', + "plusb;": '\U0000229E', + "pluscir;": '\U00002A22', + "plusdo;": '\U00002214', + "plusdu;": '\U00002A25', + "pluse;": '\U00002A72', + "plusmn;": '\U000000B1', + "plussim;": '\U00002A26', + "plustwo;": '\U00002A27', + "pm;": '\U000000B1', + "pointint;": '\U00002A15', + "popf;": '\U0001D561', + "pound;": '\U000000A3', + "pr;": '\U0000227A', + "prE;": '\U00002AB3', + "prap;": '\U00002AB7', + "prcue;": '\U0000227C', + "pre;": '\U00002AAF', + "prec;": '\U0000227A', + "precapprox;": '\U00002AB7', + "preccurlyeq;": '\U0000227C', + "preceq;": '\U00002AAF', + "precnapprox;": '\U00002AB9', + "precneqq;": '\U00002AB5', + "precnsim;": '\U000022E8', + "precsim;": '\U0000227E', + "prime;": '\U00002032', + "primes;": '\U00002119', + "prnE;": '\U00002AB5', + "prnap;": '\U00002AB9', + "prnsim;": '\U000022E8', + "prod;": '\U0000220F', + "profalar;": '\U0000232E', + "profline;": '\U00002312', + "profsurf;": '\U00002313', + "prop;": '\U0000221D', + "propto;": '\U0000221D', + "prsim;": '\U0000227E', + "prurel;": '\U000022B0', + "pscr;": '\U0001D4C5', + "psi;": '\U000003C8', + "puncsp;": '\U00002008', + "qfr;": '\U0001D52E', + "qint;": '\U00002A0C', + "qopf;": '\U0001D562', + "qprime;": '\U00002057', + "qscr;": '\U0001D4C6', + "quaternions;": '\U0000210D', + "quatint;": '\U00002A16', + "quest;": '\U0000003F', + "questeq;": '\U0000225F', + "quot;": '\U00000022', + "rAarr;": '\U000021DB', + "rArr;": '\U000021D2', + "rAtail;": '\U0000291C', + "rBarr;": '\U0000290F', + "rHar;": '\U00002964', + "racute;": '\U00000155', + "radic;": '\U0000221A', + "raemptyv;": '\U000029B3', + "rang;": '\U000027E9', + "rangd;": '\U00002992', + "range;": '\U000029A5', + "rangle;": '\U000027E9', + "raquo;": '\U000000BB', + "rarr;": '\U00002192', + "rarrap;": '\U00002975', + "rarrb;": '\U000021E5', + "rarrbfs;": '\U00002920', + "rarrc;": '\U00002933', + "rarrfs;": '\U0000291E', + "rarrhk;": '\U000021AA', + "rarrlp;": '\U000021AC', + "rarrpl;": '\U00002945', + "rarrsim;": '\U00002974', + "rarrtl;": '\U000021A3', + "rarrw;": '\U0000219D', + "ratail;": '\U0000291A', + "ratio;": '\U00002236', + "rationals;": '\U0000211A', + "rbarr;": '\U0000290D', + "rbbrk;": '\U00002773', + "rbrace;": '\U0000007D', + "rbrack;": '\U0000005D', + "rbrke;": '\U0000298C', + "rbrksld;": '\U0000298E', + "rbrkslu;": '\U00002990', + "rcaron;": '\U00000159', + "rcedil;": '\U00000157', + "rceil;": '\U00002309', + "rcub;": '\U0000007D', + "rcy;": '\U00000440', + "rdca;": '\U00002937', + "rdldhar;": '\U00002969', + "rdquo;": '\U0000201D', + "rdquor;": '\U0000201D', + "rdsh;": '\U000021B3', + "real;": '\U0000211C', + "realine;": '\U0000211B', + "realpart;": '\U0000211C', + "reals;": '\U0000211D', + "rect;": '\U000025AD', + "reg;": '\U000000AE', + "rfisht;": '\U0000297D', + "rfloor;": '\U0000230B', + "rfr;": '\U0001D52F', + "rhard;": '\U000021C1', + "rharu;": '\U000021C0', + "rharul;": '\U0000296C', + "rho;": '\U000003C1', + "rhov;": '\U000003F1', + "rightarrow;": '\U00002192', + "rightarrowtail;": '\U000021A3', + "rightharpoondown;": '\U000021C1', + "rightharpoonup;": '\U000021C0', + "rightleftarrows;": '\U000021C4', + "rightleftharpoons;": '\U000021CC', + "rightrightarrows;": '\U000021C9', + "rightsquigarrow;": '\U0000219D', + "rightthreetimes;": '\U000022CC', + "ring;": '\U000002DA', + "risingdotseq;": '\U00002253', + "rlarr;": '\U000021C4', + "rlhar;": '\U000021CC', + "rlm;": '\U0000200F', + "rmoust;": '\U000023B1', + "rmoustache;": '\U000023B1', + "rnmid;": '\U00002AEE', + "roang;": '\U000027ED', + "roarr;": '\U000021FE', + "robrk;": '\U000027E7', + "ropar;": '\U00002986', + "ropf;": '\U0001D563', + "roplus;": '\U00002A2E', + "rotimes;": '\U00002A35', + "rpar;": '\U00000029', + "rpargt;": '\U00002994', + "rppolint;": '\U00002A12', + "rrarr;": '\U000021C9', + "rsaquo;": '\U0000203A', + "rscr;": '\U0001D4C7', + "rsh;": '\U000021B1', + "rsqb;": '\U0000005D', + "rsquo;": '\U00002019', + "rsquor;": '\U00002019', + "rthree;": '\U000022CC', + "rtimes;": '\U000022CA', + "rtri;": '\U000025B9', + "rtrie;": '\U000022B5', + "rtrif;": '\U000025B8', + "rtriltri;": '\U000029CE', + "ruluhar;": '\U00002968', + "rx;": '\U0000211E', + "sacute;": '\U0000015B', + "sbquo;": '\U0000201A', + "sc;": '\U0000227B', + "scE;": '\U00002AB4', + "scap;": '\U00002AB8', + "scaron;": '\U00000161', + "sccue;": '\U0000227D', + "sce;": '\U00002AB0', + "scedil;": '\U0000015F', + "scirc;": '\U0000015D', + "scnE;": '\U00002AB6', + "scnap;": '\U00002ABA', + "scnsim;": '\U000022E9', + "scpolint;": '\U00002A13', + "scsim;": '\U0000227F', + "scy;": '\U00000441', + "sdot;": '\U000022C5', + "sdotb;": '\U000022A1', + "sdote;": '\U00002A66', + "seArr;": '\U000021D8', + "searhk;": '\U00002925', + "searr;": '\U00002198', + "searrow;": '\U00002198', + "sect;": '\U000000A7', + "semi;": '\U0000003B', + "seswar;": '\U00002929', + "setminus;": '\U00002216', + "setmn;": '\U00002216', + "sext;": '\U00002736', + "sfr;": '\U0001D530', + "sfrown;": '\U00002322', + "sharp;": '\U0000266F', + "shchcy;": '\U00000449', + "shcy;": '\U00000448', + "shortmid;": '\U00002223', + "shortparallel;": '\U00002225', + "shy;": '\U000000AD', + "sigma;": '\U000003C3', + "sigmaf;": '\U000003C2', + "sigmav;": '\U000003C2', + "sim;": '\U0000223C', + "simdot;": '\U00002A6A', + "sime;": '\U00002243', + "simeq;": '\U00002243', + "simg;": '\U00002A9E', + "simgE;": '\U00002AA0', + "siml;": '\U00002A9D', + "simlE;": '\U00002A9F', + "simne;": '\U00002246', + "simplus;": '\U00002A24', + "simrarr;": '\U00002972', + "slarr;": '\U00002190', + "smallsetminus;": '\U00002216', + "smashp;": '\U00002A33', + "smeparsl;": '\U000029E4', + "smid;": '\U00002223', + "smile;": '\U00002323', + "smt;": '\U00002AAA', + "smte;": '\U00002AAC', + "softcy;": '\U0000044C', + "sol;": '\U0000002F', + "solb;": '\U000029C4', + "solbar;": '\U0000233F', + "sopf;": '\U0001D564', + "spades;": '\U00002660', + "spadesuit;": '\U00002660', + "spar;": '\U00002225', + "sqcap;": '\U00002293', + "sqcup;": '\U00002294', + "sqsub;": '\U0000228F', + "sqsube;": '\U00002291', + "sqsubset;": '\U0000228F', + "sqsubseteq;": '\U00002291', + "sqsup;": '\U00002290', + "sqsupe;": '\U00002292', + "sqsupset;": '\U00002290', + "sqsupseteq;": '\U00002292', + "squ;": '\U000025A1', + "square;": '\U000025A1', + "squarf;": '\U000025AA', + "squf;": '\U000025AA', + "srarr;": '\U00002192', + "sscr;": '\U0001D4C8', + "ssetmn;": '\U00002216', + "ssmile;": '\U00002323', + "sstarf;": '\U000022C6', + "star;": '\U00002606', + "starf;": '\U00002605', + "straightepsilon;": '\U000003F5', + "straightphi;": '\U000003D5', + "strns;": '\U000000AF', + "sub;": '\U00002282', + "subE;": '\U00002AC5', + "subdot;": '\U00002ABD', + "sube;": '\U00002286', + "subedot;": '\U00002AC3', + "submult;": '\U00002AC1', + "subnE;": '\U00002ACB', + "subne;": '\U0000228A', + "subplus;": '\U00002ABF', + "subrarr;": '\U00002979', + "subset;": '\U00002282', + "subseteq;": '\U00002286', + "subseteqq;": '\U00002AC5', + "subsetneq;": '\U0000228A', + "subsetneqq;": '\U00002ACB', + "subsim;": '\U00002AC7', + "subsub;": '\U00002AD5', + "subsup;": '\U00002AD3', + "succ;": '\U0000227B', + "succapprox;": '\U00002AB8', + "succcurlyeq;": '\U0000227D', + "succeq;": '\U00002AB0', + "succnapprox;": '\U00002ABA', + "succneqq;": '\U00002AB6', + "succnsim;": '\U000022E9', + "succsim;": '\U0000227F', + "sum;": '\U00002211', + "sung;": '\U0000266A', + "sup;": '\U00002283', + "sup1;": '\U000000B9', + "sup2;": '\U000000B2', + "sup3;": '\U000000B3', + "supE;": '\U00002AC6', + "supdot;": '\U00002ABE', + "supdsub;": '\U00002AD8', + "supe;": '\U00002287', + "supedot;": '\U00002AC4', + "suphsol;": '\U000027C9', + "suphsub;": '\U00002AD7', + "suplarr;": '\U0000297B', + "supmult;": '\U00002AC2', + "supnE;": '\U00002ACC', + "supne;": '\U0000228B', + "supplus;": '\U00002AC0', + "supset;": '\U00002283', + "supseteq;": '\U00002287', + "supseteqq;": '\U00002AC6', + "supsetneq;": '\U0000228B', + "supsetneqq;": '\U00002ACC', + "supsim;": '\U00002AC8', + "supsub;": '\U00002AD4', + "supsup;": '\U00002AD6', + "swArr;": '\U000021D9', + "swarhk;": '\U00002926', + "swarr;": '\U00002199', + "swarrow;": '\U00002199', + "swnwar;": '\U0000292A', + "szlig;": '\U000000DF', + "target;": '\U00002316', + "tau;": '\U000003C4', + "tbrk;": '\U000023B4', + "tcaron;": '\U00000165', + "tcedil;": '\U00000163', + "tcy;": '\U00000442', + "tdot;": '\U000020DB', + "telrec;": '\U00002315', + "tfr;": '\U0001D531', + "there4;": '\U00002234', + "therefore;": '\U00002234', + "theta;": '\U000003B8', + "thetasym;": '\U000003D1', + "thetav;": '\U000003D1', + "thickapprox;": '\U00002248', + "thicksim;": '\U0000223C', + "thinsp;": '\U00002009', + "thkap;": '\U00002248', + "thksim;": '\U0000223C', + "thorn;": '\U000000FE', + "tilde;": '\U000002DC', + "times;": '\U000000D7', + "timesb;": '\U000022A0', + "timesbar;": '\U00002A31', + "timesd;": '\U00002A30', + "tint;": '\U0000222D', + "toea;": '\U00002928', + "top;": '\U000022A4', + "topbot;": '\U00002336', + "topcir;": '\U00002AF1', + "topf;": '\U0001D565', + "topfork;": '\U00002ADA', + "tosa;": '\U00002929', + "tprime;": '\U00002034', + "trade;": '\U00002122', + "triangle;": '\U000025B5', + "triangledown;": '\U000025BF', + "triangleleft;": '\U000025C3', + "trianglelefteq;": '\U000022B4', + "triangleq;": '\U0000225C', + "triangleright;": '\U000025B9', + "trianglerighteq;": '\U000022B5', + "tridot;": '\U000025EC', + "trie;": '\U0000225C', + "triminus;": '\U00002A3A', + "triplus;": '\U00002A39', + "trisb;": '\U000029CD', + "tritime;": '\U00002A3B', + "trpezium;": '\U000023E2', + "tscr;": '\U0001D4C9', + "tscy;": '\U00000446', + "tshcy;": '\U0000045B', + "tstrok;": '\U00000167', + "twixt;": '\U0000226C', + "twoheadleftarrow;": '\U0000219E', + "twoheadrightarrow;": '\U000021A0', + "uArr;": '\U000021D1', + "uHar;": '\U00002963', + "uacute;": '\U000000FA', + "uarr;": '\U00002191', + "ubrcy;": '\U0000045E', + "ubreve;": '\U0000016D', + "ucirc;": '\U000000FB', + "ucy;": '\U00000443', + "udarr;": '\U000021C5', + "udblac;": '\U00000171', + "udhar;": '\U0000296E', + "ufisht;": '\U0000297E', + "ufr;": '\U0001D532', + "ugrave;": '\U000000F9', + "uharl;": '\U000021BF', + "uharr;": '\U000021BE', + "uhblk;": '\U00002580', + "ulcorn;": '\U0000231C', + "ulcorner;": '\U0000231C', + "ulcrop;": '\U0000230F', + "ultri;": '\U000025F8', + "umacr;": '\U0000016B', + "uml;": '\U000000A8', + "uogon;": '\U00000173', + "uopf;": '\U0001D566', + "uparrow;": '\U00002191', + "updownarrow;": '\U00002195', + "upharpoonleft;": '\U000021BF', + "upharpoonright;": '\U000021BE', + "uplus;": '\U0000228E', + "upsi;": '\U000003C5', + "upsih;": '\U000003D2', + "upsilon;": '\U000003C5', + "upuparrows;": '\U000021C8', + "urcorn;": '\U0000231D', + "urcorner;": '\U0000231D', + "urcrop;": '\U0000230E', + "uring;": '\U0000016F', + "urtri;": '\U000025F9', + "uscr;": '\U0001D4CA', + "utdot;": '\U000022F0', + "utilde;": '\U00000169', + "utri;": '\U000025B5', + "utrif;": '\U000025B4', + "uuarr;": '\U000021C8', + "uuml;": '\U000000FC', + "uwangle;": '\U000029A7', + "vArr;": '\U000021D5', + "vBar;": '\U00002AE8', + "vBarv;": '\U00002AE9', + "vDash;": '\U000022A8', + "vangrt;": '\U0000299C', + "varepsilon;": '\U000003F5', + "varkappa;": '\U000003F0', + "varnothing;": '\U00002205', + "varphi;": '\U000003D5', + "varpi;": '\U000003D6', + "varpropto;": '\U0000221D', + "varr;": '\U00002195', + "varrho;": '\U000003F1', + "varsigma;": '\U000003C2', + "vartheta;": '\U000003D1', + "vartriangleleft;": '\U000022B2', + "vartriangleright;": '\U000022B3', + "vcy;": '\U00000432', + "vdash;": '\U000022A2', + "vee;": '\U00002228', + "veebar;": '\U000022BB', + "veeeq;": '\U0000225A', + "vellip;": '\U000022EE', + "verbar;": '\U0000007C', + "vert;": '\U0000007C', + "vfr;": '\U0001D533', + "vltri;": '\U000022B2', + "vopf;": '\U0001D567', + "vprop;": '\U0000221D', + "vrtri;": '\U000022B3', + "vscr;": '\U0001D4CB', + "vzigzag;": '\U0000299A', + "wcirc;": '\U00000175', + "wedbar;": '\U00002A5F', + "wedge;": '\U00002227', + "wedgeq;": '\U00002259', + "weierp;": '\U00002118', + "wfr;": '\U0001D534', + "wopf;": '\U0001D568', + "wp;": '\U00002118', + "wr;": '\U00002240', + "wreath;": '\U00002240', + "wscr;": '\U0001D4CC', + "xcap;": '\U000022C2', + "xcirc;": '\U000025EF', + "xcup;": '\U000022C3', + "xdtri;": '\U000025BD', + "xfr;": '\U0001D535', + "xhArr;": '\U000027FA', + "xharr;": '\U000027F7', + "xi;": '\U000003BE', + "xlArr;": '\U000027F8', + "xlarr;": '\U000027F5', + "xmap;": '\U000027FC', + "xnis;": '\U000022FB', + "xodot;": '\U00002A00', + "xopf;": '\U0001D569', + "xoplus;": '\U00002A01', + "xotime;": '\U00002A02', + "xrArr;": '\U000027F9', + "xrarr;": '\U000027F6', + "xscr;": '\U0001D4CD', + "xsqcup;": '\U00002A06', + "xuplus;": '\U00002A04', + "xutri;": '\U000025B3', + "xvee;": '\U000022C1', + "xwedge;": '\U000022C0', + "yacute;": '\U000000FD', + "yacy;": '\U0000044F', + "ycirc;": '\U00000177', + "ycy;": '\U0000044B', + "yen;": '\U000000A5', + "yfr;": '\U0001D536', + "yicy;": '\U00000457', + "yopf;": '\U0001D56A', + "yscr;": '\U0001D4CE', + "yucy;": '\U0000044E', + "yuml;": '\U000000FF', + "zacute;": '\U0000017A', + "zcaron;": '\U0000017E', + "zcy;": '\U00000437', + "zdot;": '\U0000017C', + "zeetrf;": '\U00002128', + "zeta;": '\U000003B6', + "zfr;": '\U0001D537', + "zhcy;": '\U00000436', + "zigrarr;": '\U000021DD', + "zopf;": '\U0001D56B', + "zscr;": '\U0001D4CF', + "zwj;": '\U0000200D', + "zwnj;": '\U0000200C', + "AElig": '\U000000C6', + "AMP": '\U00000026', + "Aacute": '\U000000C1', + "Acirc": '\U000000C2', + "Agrave": '\U000000C0', + "Aring": '\U000000C5', + "Atilde": '\U000000C3', + "Auml": '\U000000C4', + "COPY": '\U000000A9', + "Ccedil": '\U000000C7', + "ETH": '\U000000D0', + "Eacute": '\U000000C9', + "Ecirc": '\U000000CA', + "Egrave": '\U000000C8', + "Euml": '\U000000CB', + "GT": '\U0000003E', + "Iacute": '\U000000CD', + "Icirc": '\U000000CE', + "Igrave": '\U000000CC', + "Iuml": '\U000000CF', + "LT": '\U0000003C', + "Ntilde": '\U000000D1', + "Oacute": '\U000000D3', + "Ocirc": '\U000000D4', + "Ograve": '\U000000D2', + "Oslash": '\U000000D8', + "Otilde": '\U000000D5', + "Ouml": '\U000000D6', + "QUOT": '\U00000022', + "REG": '\U000000AE', + "THORN": '\U000000DE', + "Uacute": '\U000000DA', + "Ucirc": '\U000000DB', + "Ugrave": '\U000000D9', + "Uuml": '\U000000DC', + "Yacute": '\U000000DD', + "aacute": '\U000000E1', + "acirc": '\U000000E2', + "acute": '\U000000B4', + "aelig": '\U000000E6', + "agrave": '\U000000E0', + "amp": '\U00000026', + "aring": '\U000000E5', + "atilde": '\U000000E3', + "auml": '\U000000E4', + "brvbar": '\U000000A6', + "ccedil": '\U000000E7', + "cedil": '\U000000B8', + "cent": '\U000000A2', + "copy": '\U000000A9', + "curren": '\U000000A4', + "deg": '\U000000B0', + "divide": '\U000000F7', + "eacute": '\U000000E9', + "ecirc": '\U000000EA', + "egrave": '\U000000E8', + "eth": '\U000000F0', + "euml": '\U000000EB', + "frac12": '\U000000BD', + "frac14": '\U000000BC', + "frac34": '\U000000BE', + "gt": '\U0000003E', + "iacute": '\U000000ED', + "icirc": '\U000000EE', + "iexcl": '\U000000A1', + "igrave": '\U000000EC', + "iquest": '\U000000BF', + "iuml": '\U000000EF', + "laquo": '\U000000AB', + "lt": '\U0000003C', + "macr": '\U000000AF', + "micro": '\U000000B5', + "middot": '\U000000B7', + "nbsp": '\U000000A0', + "not": '\U000000AC', + "ntilde": '\U000000F1', + "oacute": '\U000000F3', + "ocirc": '\U000000F4', + "ograve": '\U000000F2', + "ordf": '\U000000AA', + "ordm": '\U000000BA', + "oslash": '\U000000F8', + "otilde": '\U000000F5', + "ouml": '\U000000F6', + "para": '\U000000B6', + "plusmn": '\U000000B1', + "pound": '\U000000A3', + "quot": '\U00000022', + "raquo": '\U000000BB', + "reg": '\U000000AE', + "sect": '\U000000A7', + "shy": '\U000000AD', + "sup1": '\U000000B9', + "sup2": '\U000000B2', + "sup3": '\U000000B3', + "szlig": '\U000000DF', + "thorn": '\U000000FE', + "times": '\U000000D7', + "uacute": '\U000000FA', + "ucirc": '\U000000FB', + "ugrave": '\U000000F9', + "uml": '\U000000A8', + "uuml": '\U000000FC', + "yacute": '\U000000FD', + "yen": '\U000000A5', + "yuml": '\U000000FF', +} + +// HTML entities that are two unicode codepoints. +var entity2 = map[string][2]rune{ + // TODO(nigeltao): Handle replacements that are wider than their names. + // "nLt;": {'\u226A', '\u20D2'}, + // "nGt;": {'\u226B', '\u20D2'}, + "NotEqualTilde;": {'\u2242', '\u0338'}, + "NotGreaterFullEqual;": {'\u2267', '\u0338'}, + "NotGreaterGreater;": {'\u226B', '\u0338'}, + "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, + "NotHumpDownHump;": {'\u224E', '\u0338'}, + "NotHumpEqual;": {'\u224F', '\u0338'}, + "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, + "NotLessLess;": {'\u226A', '\u0338'}, + "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, + "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, + "NotNestedLessLess;": {'\u2AA1', '\u0338'}, + "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, + "NotRightTriangleBar;": {'\u29D0', '\u0338'}, + "NotSquareSubset;": {'\u228F', '\u0338'}, + "NotSquareSuperset;": {'\u2290', '\u0338'}, + "NotSubset;": {'\u2282', '\u20D2'}, + "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, + "NotSucceedsTilde;": {'\u227F', '\u0338'}, + "NotSuperset;": {'\u2283', '\u20D2'}, + "ThickSpace;": {'\u205F', '\u200A'}, + "acE;": {'\u223E', '\u0333'}, + "bne;": {'\u003D', '\u20E5'}, + "bnequiv;": {'\u2261', '\u20E5'}, + "caps;": {'\u2229', '\uFE00'}, + "cups;": {'\u222A', '\uFE00'}, + "fjlig;": {'\u0066', '\u006A'}, + "gesl;": {'\u22DB', '\uFE00'}, + "gvertneqq;": {'\u2269', '\uFE00'}, + "gvnE;": {'\u2269', '\uFE00'}, + "lates;": {'\u2AAD', '\uFE00'}, + "lesg;": {'\u22DA', '\uFE00'}, + "lvertneqq;": {'\u2268', '\uFE00'}, + "lvnE;": {'\u2268', '\uFE00'}, + "nGg;": {'\u22D9', '\u0338'}, + "nGtv;": {'\u226B', '\u0338'}, + "nLl;": {'\u22D8', '\u0338'}, + "nLtv;": {'\u226A', '\u0338'}, + "nang;": {'\u2220', '\u20D2'}, + "napE;": {'\u2A70', '\u0338'}, + "napid;": {'\u224B', '\u0338'}, + "nbump;": {'\u224E', '\u0338'}, + "nbumpe;": {'\u224F', '\u0338'}, + "ncongdot;": {'\u2A6D', '\u0338'}, + "nedot;": {'\u2250', '\u0338'}, + "nesim;": {'\u2242', '\u0338'}, + "ngE;": {'\u2267', '\u0338'}, + "ngeqq;": {'\u2267', '\u0338'}, + "ngeqslant;": {'\u2A7E', '\u0338'}, + "nges;": {'\u2A7E', '\u0338'}, + "nlE;": {'\u2266', '\u0338'}, + "nleqq;": {'\u2266', '\u0338'}, + "nleqslant;": {'\u2A7D', '\u0338'}, + "nles;": {'\u2A7D', '\u0338'}, + "notinE;": {'\u22F9', '\u0338'}, + "notindot;": {'\u22F5', '\u0338'}, + "nparsl;": {'\u2AFD', '\u20E5'}, + "npart;": {'\u2202', '\u0338'}, + "npre;": {'\u2AAF', '\u0338'}, + "npreceq;": {'\u2AAF', '\u0338'}, + "nrarrc;": {'\u2933', '\u0338'}, + "nrarrw;": {'\u219D', '\u0338'}, + "nsce;": {'\u2AB0', '\u0338'}, + "nsubE;": {'\u2AC5', '\u0338'}, + "nsubset;": {'\u2282', '\u20D2'}, + "nsubseteqq;": {'\u2AC5', '\u0338'}, + "nsucceq;": {'\u2AB0', '\u0338'}, + "nsupE;": {'\u2AC6', '\u0338'}, + "nsupset;": {'\u2283', '\u20D2'}, + "nsupseteqq;": {'\u2AC6', '\u0338'}, + "nvap;": {'\u224D', '\u20D2'}, + "nvge;": {'\u2265', '\u20D2'}, + "nvgt;": {'\u003E', '\u20D2'}, + "nvle;": {'\u2264', '\u20D2'}, + "nvlt;": {'\u003C', '\u20D2'}, + "nvltrie;": {'\u22B4', '\u20D2'}, + "nvrtrie;": {'\u22B5', '\u20D2'}, + "nvsim;": {'\u223C', '\u20D2'}, + "race;": {'\u223D', '\u0331'}, + "smtes;": {'\u2AAC', '\uFE00'}, + "sqcaps;": {'\u2293', '\uFE00'}, + "sqcups;": {'\u2294', '\uFE00'}, + "varsubsetneq;": {'\u228A', '\uFE00'}, + "varsubsetneqq;": {'\u2ACB', '\uFE00'}, + "varsupsetneq;": {'\u228B', '\uFE00'}, + "varsupsetneqq;": {'\u2ACC', '\uFE00'}, + "vnsub;": {'\u2282', '\u20D2'}, + "vnsup;": {'\u2283', '\u20D2'}, + "vsubnE;": {'\u2ACB', '\uFE00'}, + "vsubne;": {'\u228A', '\uFE00'}, + "vsupnE;": {'\u2ACC', '\uFE00'}, + "vsupne;": {'\u228B', '\uFE00'}, +} diff --git a/vendor/golang.org/x/net/html/entity_test.go b/vendor/golang.org/x/net/html/entity_test.go new file mode 100644 index 000000000..b53f866fa --- /dev/null +++ b/vendor/golang.org/x/net/html/entity_test.go @@ -0,0 +1,29 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "testing" + "unicode/utf8" +) + +func TestEntityLength(t *testing.T) { + // We verify that the length of UTF-8 encoding of each value is <= 1 + len(key). + // The +1 comes from the leading "&". This property implies that the length of + // unescaped text is <= the length of escaped text. + for k, v := range entity { + if 1+len(k) < utf8.RuneLen(v) { + t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v)) + } + if len(k) > longestEntityWithoutSemicolon && k[len(k)-1] != ';' { + t.Errorf("entity name %s is %d characters, but longestEntityWithoutSemicolon=%d", k, len(k), longestEntityWithoutSemicolon) + } + } + for k, v := range entity2 { + if 1+len(k) < utf8.RuneLen(v[0])+utf8.RuneLen(v[1]) { + t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v[0]) + string(v[1])) + } + } +} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go new file mode 100644 index 000000000..d85613962 --- /dev/null +++ b/vendor/golang.org/x/net/html/escape.go @@ -0,0 +1,258 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// These replacements permit compatibility with old numeric entities that +// assumed Windows-1252 encoding. +// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference +var replacementTable = [...]rune{ + '\u20AC', // First entry is what 0x80 should be replaced with. + '\u0081', + '\u201A', + '\u0192', + '\u201E', + '\u2026', + '\u2020', + '\u2021', + '\u02C6', + '\u2030', + '\u0160', + '\u2039', + '\u0152', + '\u008D', + '\u017D', + '\u008F', + '\u0090', + '\u2018', + '\u2019', + '\u201C', + '\u201D', + '\u2022', + '\u2013', + '\u2014', + '\u02DC', + '\u2122', + '\u0161', + '\u203A', + '\u0153', + '\u009D', + '\u017E', + '\u0178', // Last entry is 0x9F. + // 0x00->'\uFFFD' is handled programmatically. + // 0x0D->'\u000D' is a no-op. +} + +// unescapeEntity reads an entity like "<" from b[src:] and writes the +// corresponding "<" to b[dst:], returning the incremented dst and src cursors. +// Precondition: b[src] == '&' && dst <= src. +// attribute should be true if parsing an attribute value. +func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { + // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference + + // i starts at 1 because we already know that s[0] == '&'. + i, s := 1, b[src:] + + if len(s) <= 1 { + b[dst] = b[src] + return dst + 1, src + 1 + } + + if s[i] == '#' { + if len(s) <= 3 { // We need to have at least "&#.". + b[dst] = b[src] + return dst + 1, src + 1 + } + i++ + c := s[i] + hex := false + if c == 'x' || c == 'X' { + hex = true + i++ + } + + x := '\x00' + for i < len(s) { + c = s[i] + i++ + if hex { + if '0' <= c && c <= '9' { + x = 16*x + rune(c) - '0' + continue + } else if 'a' <= c && c <= 'f' { + x = 16*x + rune(c) - 'a' + 10 + continue + } else if 'A' <= c && c <= 'F' { + x = 16*x + rune(c) - 'A' + 10 + continue + } + } else if '0' <= c && c <= '9' { + x = 10*x + rune(c) - '0' + continue + } + if c != ';' { + i-- + } + break + } + + if i <= 3 { // No characters matched. + b[dst] = b[src] + return dst + 1, src + 1 + } + + if 0x80 <= x && x <= 0x9F { + // Replace characters from Windows-1252 with UTF-8 equivalents. + x = replacementTable[x-0x80] + } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { + // Replace invalid characters with the replacement character. + x = '\uFFFD' + } + + return dst + utf8.EncodeRune(b[dst:], x), src + i + } + + // Consume the maximum number of characters possible, with the + // consumed characters matching one of the named references. + + for i < len(s) { + c := s[i] + i++ + // Lower-cased characters are more common in entities, so we check for them first. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + continue + } + if c != ';' { + i-- + } + break + } + + entityName := string(s[1:i]) + if entityName == "" { + // No-op. + } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { + // No-op. + } else if x := entity[entityName]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + i + } else if x := entity2[entityName]; x[0] != 0 { + dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) + return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i + } else if !attribute { + maxLen := len(entityName) - 1 + if maxLen > longestEntityWithoutSemicolon { + maxLen = longestEntityWithoutSemicolon + } + for j := maxLen; j > 1; j-- { + if x := entity[entityName[:j]]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 + } + } + } + + dst1, src1 = dst+i, src+i + copy(b[dst:dst1], b[src:src1]) + return dst1, src1 +} + +// unescape unescapes b's entities in-place, so that "a<b" becomes "a': + esc = ">" + case '"': + // """ is shorter than """. + esc = """ + case '\r': + esc = " " + default: + panic("unrecognized escape character") + } + s = s[i+1:] + if _, err := w.WriteString(esc); err != nil { + return err + } + i = strings.IndexAny(s, escapedChars) + } + _, err := w.WriteString(s) + return err +} + +// EscapeString escapes special characters like "<" to become "<". It +// escapes only five such characters: <, >, &, ' and ". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func EscapeString(s string) string { + if strings.IndexAny(s, escapedChars) == -1 { + return s + } + var buf bytes.Buffer + escape(&buf, s) + return buf.String() +} + +// UnescapeString unescapes entities like "<" to become "<". It unescapes a +// larger range of entities than EscapeString escapes. For example, "á" +// unescapes to "á", as does "á" and "&xE1;". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func UnescapeString(s string) string { + for _, c := range s { + if c == '&' { + return string(unescape([]byte(s), false)) + } + } + return s +} diff --git a/vendor/golang.org/x/net/html/escape_test.go b/vendor/golang.org/x/net/html/escape_test.go new file mode 100644 index 000000000..b405d4b4a --- /dev/null +++ b/vendor/golang.org/x/net/html/escape_test.go @@ -0,0 +1,97 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import "testing" + +type unescapeTest struct { + // A short description of the test case. + desc string + // The HTML text. + html string + // The unescaped text. + unescaped string +} + +var unescapeTests = []unescapeTest{ + // Handle no entities. + { + "copy", + "A\ttext\nstring", + "A\ttext\nstring", + }, + // Handle simple named entities. + { + "simple", + "& > <", + "& > <", + }, + // Handle hitting the end of the string. + { + "stringEnd", + "& &", + "& &", + }, + // Handle entities with two codepoints. + { + "multiCodepoint", + "text ⋛︀ blah", + "text \u22db\ufe00 blah", + }, + // Handle decimal numeric entities. + { + "decimalEntity", + "Delta = Δ ", + "Delta = Δ ", + }, + // Handle hexadecimal numeric entities. + { + "hexadecimalEntity", + "Lambda = λ = λ ", + "Lambda = λ = λ ", + }, + // Handle numeric early termination. + { + "numericEnds", + "&# &#x €43 © = ©f = ©", + "&# &#x €43 © = ©f = ©", + }, + // Handle numeric ISO-8859-1 entity replacements. + { + "numericReplacements", + "Footnote‡", + "Footnote‡", + }, +} + +func TestUnescape(t *testing.T) { + for _, tt := range unescapeTests { + unescaped := UnescapeString(tt.html) + if unescaped != tt.unescaped { + t.Errorf("TestUnescape %s: want %q, got %q", tt.desc, tt.unescaped, unescaped) + } + } +} + +func TestUnescapeEscape(t *testing.T) { + ss := []string{ + ``, + `abc def`, + `a & b`, + `a&b`, + `a & b`, + `"`, + `"`, + `"<&>"`, + `"<&>"`, + `3&5==1 && 0<1, "0<1", a+acute=á`, + `The special characters are: <, >, &, ' and "`, + } + for _, s := range ss { + if got := UnescapeString(EscapeString(s)); got != s { + t.Errorf("got %q want %q", got, s) + } + } +} diff --git a/vendor/golang.org/x/net/html/example_test.go b/vendor/golang.org/x/net/html/example_test.go new file mode 100644 index 000000000..0b06ed773 --- /dev/null +++ b/vendor/golang.org/x/net/html/example_test.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This example demonstrates parsing HTML data and walking the resulting tree. +package html_test + +import ( + "fmt" + "log" + "strings" + + "golang.org/x/net/html" +) + +func ExampleParse() { + s := `

Links:

` + doc, err := html.Parse(strings.NewReader(s)) + if err != nil { + log.Fatal(err) + } + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + for _, a := range n.Attr { + if a.Key == "href" { + fmt.Println(a.Val) + break + } + } + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + // Output: + // foo + // /bar/baz +} diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go new file mode 100644 index 000000000..01477a963 --- /dev/null +++ b/vendor/golang.org/x/net/html/foreign.go @@ -0,0 +1,226 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { + for i := range aa { + if newName, ok := nameMap[aa[i].Key]; ok { + aa[i].Key = newName + } + } +} + +func adjustForeignAttributes(aa []Attribute) { + for i, a := range aa { + if a.Key == "" || a.Key[0] != 'x' { + continue + } + switch a.Key { + case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", + "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": + j := strings.Index(a.Key, ":") + aa[i].Namespace = a.Key[:j] + aa[i].Key = a.Key[j+1:] + } + } +} + +func htmlIntegrationPoint(n *Node) bool { + if n.Type != ElementNode { + return false + } + switch n.Namespace { + case "math": + if n.Data == "annotation-xml" { + for _, a := range n.Attr { + if a.Key == "encoding" { + val := strings.ToLower(a.Val) + if val == "text/html" || val == "application/xhtml+xml" { + return true + } + } + } + } + case "svg": + switch n.Data { + case "desc", "foreignObject", "title": + return true + } + } + return false +} + +func mathMLTextIntegrationPoint(n *Node) bool { + if n.Namespace != "math" { + return false + } + switch n.Data { + case "mi", "mo", "mn", "ms", "mtext": + return true + } + return false +} + +// Section 12.2.6.5. +var breakout = map[string]bool{ + "b": true, + "big": true, + "blockquote": true, + "body": true, + "br": true, + "center": true, + "code": true, + "dd": true, + "div": true, + "dl": true, + "dt": true, + "em": true, + "embed": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "hr": true, + "i": true, + "img": true, + "li": true, + "listing": true, + "menu": true, + "meta": true, + "nobr": true, + "ol": true, + "p": true, + "pre": true, + "ruby": true, + "s": true, + "small": true, + "span": true, + "strong": true, + "strike": true, + "sub": true, + "sup": true, + "table": true, + "tt": true, + "u": true, + "ul": true, + "var": true, +} + +// Section 12.2.6.5. +var svgTagNameAdjustments = map[string]string{ + "altglyph": "altGlyph", + "altglyphdef": "altGlyphDef", + "altglyphitem": "altGlyphItem", + "animatecolor": "animateColor", + "animatemotion": "animateMotion", + "animatetransform": "animateTransform", + "clippath": "clipPath", + "feblend": "feBlend", + "fecolormatrix": "feColorMatrix", + "fecomponenttransfer": "feComponentTransfer", + "fecomposite": "feComposite", + "feconvolvematrix": "feConvolveMatrix", + "fediffuselighting": "feDiffuseLighting", + "fedisplacementmap": "feDisplacementMap", + "fedistantlight": "feDistantLight", + "feflood": "feFlood", + "fefunca": "feFuncA", + "fefuncb": "feFuncB", + "fefuncg": "feFuncG", + "fefuncr": "feFuncR", + "fegaussianblur": "feGaussianBlur", + "feimage": "feImage", + "femerge": "feMerge", + "femergenode": "feMergeNode", + "femorphology": "feMorphology", + "feoffset": "feOffset", + "fepointlight": "fePointLight", + "fespecularlighting": "feSpecularLighting", + "fespotlight": "feSpotLight", + "fetile": "feTile", + "feturbulence": "feTurbulence", + "foreignobject": "foreignObject", + "glyphref": "glyphRef", + "lineargradient": "linearGradient", + "radialgradient": "radialGradient", + "textpath": "textPath", +} + +// Section 12.2.6.1 +var mathMLAttributeAdjustments = map[string]string{ + "definitionurl": "definitionURL", +} + +var svgAttributeAdjustments = map[string]string{ + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "contentscripttype": "contentScriptType", + "contentstyletype": "contentStyleType", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "externalresourcesrequired": "externalResourcesRequired", + "filterres": "filterRes", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan", +} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go new file mode 100644 index 000000000..6f136c4ef --- /dev/null +++ b/vendor/golang.org/x/net/html/node.go @@ -0,0 +1,194 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "golang.org/x/net/html/atom" +) + +// A NodeType is the type of a Node. +type NodeType uint32 + +const ( + ErrorNode NodeType = iota + TextNode + DocumentNode + ElementNode + CommentNode + DoctypeNode + scopeMarkerNode +) + +// Section 12.2.4.3 says "The markers are inserted when entering applet, +// object, marquee, template, td, th, and caption elements, and are used +// to prevent formatting from "leaking" into applet, object, marquee, +// template, td, th, and caption elements". +var scopeMarker = Node{Type: scopeMarkerNode} + +// A Node consists of a NodeType and some Data (tag name for element nodes, +// content for text) and are part of a tree of Nodes. Element nodes may also +// have a Namespace and contain a slice of Attributes. Data is unescaped, so +// that it looks like "a 0 { + return (*s)[i-1] + } + return nil +} + +// index returns the index of the top-most occurrence of n in the stack, or -1 +// if n is not present. +func (s *nodeStack) index(n *Node) int { + for i := len(*s) - 1; i >= 0; i-- { + if (*s)[i] == n { + return i + } + } + return -1 +} + +// insert inserts a node at the given index. +func (s *nodeStack) insert(i int, n *Node) { + (*s) = append(*s, nil) + copy((*s)[i+1:], (*s)[i:]) + (*s)[i] = n +} + +// remove removes a node from the stack. It is a no-op if n is not present. +func (s *nodeStack) remove(n *Node) { + i := s.index(n) + if i == -1 { + return + } + copy((*s)[i:], (*s)[i+1:]) + j := len(*s) - 1 + (*s)[j] = nil + *s = (*s)[:j] +} diff --git a/vendor/golang.org/x/net/html/node_test.go b/vendor/golang.org/x/net/html/node_test.go new file mode 100644 index 000000000..471102f3a --- /dev/null +++ b/vendor/golang.org/x/net/html/node_test.go @@ -0,0 +1,146 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "fmt" +) + +// checkTreeConsistency checks that a node and its descendants are all +// consistent in their parent/child/sibling relationships. +func checkTreeConsistency(n *Node) error { + return checkTreeConsistency1(n, 0) +} + +func checkTreeConsistency1(n *Node, depth int) error { + if depth == 1e4 { + return fmt.Errorf("html: tree looks like it contains a cycle") + } + if err := checkNodeConsistency(n); err != nil { + return err + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + if err := checkTreeConsistency1(c, depth+1); err != nil { + return err + } + } + return nil +} + +// checkNodeConsistency checks that a node's parent/child/sibling relationships +// are consistent. +func checkNodeConsistency(n *Node) error { + if n == nil { + return nil + } + + nParent := 0 + for p := n.Parent; p != nil; p = p.Parent { + nParent++ + if nParent == 1e4 { + return fmt.Errorf("html: parent list looks like an infinite loop") + } + } + + nForward := 0 + for c := n.FirstChild; c != nil; c = c.NextSibling { + nForward++ + if nForward == 1e6 { + return fmt.Errorf("html: forward list of children looks like an infinite loop") + } + if c.Parent != n { + return fmt.Errorf("html: inconsistent child/parent relationship") + } + } + + nBackward := 0 + for c := n.LastChild; c != nil; c = c.PrevSibling { + nBackward++ + if nBackward == 1e6 { + return fmt.Errorf("html: backward list of children looks like an infinite loop") + } + if c.Parent != n { + return fmt.Errorf("html: inconsistent child/parent relationship") + } + } + + if n.Parent != nil { + if n.Parent == n { + return fmt.Errorf("html: inconsistent parent relationship") + } + if n.Parent == n.FirstChild { + return fmt.Errorf("html: inconsistent parent/first relationship") + } + if n.Parent == n.LastChild { + return fmt.Errorf("html: inconsistent parent/last relationship") + } + if n.Parent == n.PrevSibling { + return fmt.Errorf("html: inconsistent parent/prev relationship") + } + if n.Parent == n.NextSibling { + return fmt.Errorf("html: inconsistent parent/next relationship") + } + + parentHasNAsAChild := false + for c := n.Parent.FirstChild; c != nil; c = c.NextSibling { + if c == n { + parentHasNAsAChild = true + break + } + } + if !parentHasNAsAChild { + return fmt.Errorf("html: inconsistent parent/child relationship") + } + } + + if n.PrevSibling != nil && n.PrevSibling.NextSibling != n { + return fmt.Errorf("html: inconsistent prev/next relationship") + } + if n.NextSibling != nil && n.NextSibling.PrevSibling != n { + return fmt.Errorf("html: inconsistent next/prev relationship") + } + + if (n.FirstChild == nil) != (n.LastChild == nil) { + return fmt.Errorf("html: inconsistent first/last relationship") + } + if n.FirstChild != nil && n.FirstChild == n.LastChild { + // We have a sole child. + if n.FirstChild.PrevSibling != nil || n.FirstChild.NextSibling != nil { + return fmt.Errorf("html: inconsistent sole child's sibling relationship") + } + } + + seen := map[*Node]bool{} + + var last *Node + for c := n.FirstChild; c != nil; c = c.NextSibling { + if seen[c] { + return fmt.Errorf("html: inconsistent repeated child") + } + seen[c] = true + last = c + } + if last != n.LastChild { + return fmt.Errorf("html: inconsistent last relationship") + } + + var first *Node + for c := n.LastChild; c != nil; c = c.PrevSibling { + if !seen[c] { + return fmt.Errorf("html: inconsistent missing child") + } + delete(seen, c) + first = c + } + if first != n.FirstChild { + return fmt.Errorf("html: inconsistent first relationship") + } + + if len(seen) != 0 { + return fmt.Errorf("html: inconsistent forwards/backwards child list") + } + + return nil +} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go new file mode 100644 index 000000000..2a5abddc6 --- /dev/null +++ b/vendor/golang.org/x/net/html/parse.go @@ -0,0 +1,2094 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "errors" + "fmt" + "io" + "strings" + + a "golang.org/x/net/html/atom" +) + +// A parser implements the HTML5 parsing algorithm: +// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction +type parser struct { + // tokenizer provides the tokens for the parser. + tokenizer *Tokenizer + // tok is the most recently read token. + tok Token + // Self-closing tags like
are treated as start tags, except that + // hasSelfClosingToken is set while they are being processed. + hasSelfClosingToken bool + // doc is the document root element. + doc *Node + // The stack of open elements (section 12.2.4.2) and active formatting + // elements (section 12.2.4.3). + oe, afe nodeStack + // Element pointers (section 12.2.4.4). + head, form *Node + // Other parsing state flags (section 12.2.4.5). + scripting, framesetOK bool + // im is the current insertion mode. + im insertionMode + // originalIM is the insertion mode to go back to after completing a text + // or inTableText insertion mode. + originalIM insertionMode + // fosterParenting is whether new elements should be inserted according to + // the foster parenting rules (section 12.2.6.1). + fosterParenting bool + // quirks is whether the parser is operating in "quirks mode." + quirks bool + // fragment is whether the parser is parsing an HTML fragment. + fragment bool + // context is the context element when parsing an HTML fragment + // (section 12.4). + context *Node +} + +func (p *parser) top() *Node { + if n := p.oe.top(); n != nil { + return n + } + return p.doc +} + +// Stop tags for use in popUntil. These come from section 12.2.4.2. +var ( + defaultScopeStopTags = map[string][]a.Atom{ + "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, + "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, + "svg": {a.Desc, a.ForeignObject, a.Title}, + } +) + +type scope int + +const ( + defaultScope scope = iota + listItemScope + buttonScope + tableScope + tableRowScope + tableBodyScope + selectScope +) + +// popUntil pops the stack of open elements at the highest element whose tag +// is in matchTags, provided there is no higher element in the scope's stop +// tags (as defined in section 12.2.4.2). It returns whether or not there was +// such an element. If there was not, popUntil leaves the stack unchanged. +// +// For example, the set of stop tags for table scope is: "html", "table". If +// the stack was: +// ["html", "body", "font", "table", "b", "i", "u"] +// then popUntil(tableScope, "font") would return false, but +// popUntil(tableScope, "i") would return true and the stack would become: +// ["html", "body", "font", "table", "b"] +// +// If an element's tag is in both the stop tags and matchTags, then the stack +// will be popped and the function returns true (provided, of course, there was +// no higher element in the stack that was also in the stop tags). For example, +// popUntil(tableScope, "table") returns true and leaves: +// ["html", "body", "font"] +func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { + if i := p.indexOfElementInScope(s, matchTags...); i != -1 { + p.oe = p.oe[:i] + return true + } + return false +} + +// indexOfElementInScope returns the index in p.oe of the highest element whose +// tag is in matchTags that is in scope. If no matching element is in scope, it +// returns -1. +func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + if p.oe[i].Namespace == "" { + for _, t := range matchTags { + if t == tagAtom { + return i + } + } + switch s { + case defaultScope: + // No-op. + case listItemScope: + if tagAtom == a.Ol || tagAtom == a.Ul { + return -1 + } + case buttonScope: + if tagAtom == a.Button { + return -1 + } + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table { + return -1 + } + case selectScope: + if tagAtom != a.Optgroup && tagAtom != a.Option { + return -1 + } + default: + panic("unreachable") + } + } + switch s { + case defaultScope, listItemScope, buttonScope: + for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { + if t == tagAtom { + return -1 + } + } + } + } + return -1 +} + +// elementInScope is like popUntil, except that it doesn't modify the stack of +// open elements. +func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { + return p.indexOfElementInScope(s, matchTags...) != -1 +} + +// clearStackToContext pops elements off the stack of open elements until a +// scope-defined element is found. +func (p *parser) clearStackToContext(s scope) { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + switch s { + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table { + p.oe = p.oe[:i+1] + return + } + case tableRowScope: + if tagAtom == a.Html || tagAtom == a.Tr { + p.oe = p.oe[:i+1] + return + } + case tableBodyScope: + if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead { + p.oe = p.oe[:i+1] + return + } + default: + panic("unreachable") + } + } +} + +// generateImpliedEndTags pops nodes off the stack of open elements as long as +// the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt. +// If exceptions are specified, nodes with that name will not be popped off. +func (p *parser) generateImpliedEndTags(exceptions ...string) { + var i int +loop: + for i = len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if n.Type == ElementNode { + switch n.DataAtom { + case a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt: + for _, except := range exceptions { + if n.Data == except { + break loop + } + } + continue + } + } + break + } + + p.oe = p.oe[:i+1] +} + +// addChild adds a child node n to the top element, and pushes n onto the stack +// of open elements if it is an element node. +func (p *parser) addChild(n *Node) { + if p.shouldFosterParent() { + p.fosterParent(n) + } else { + p.top().AppendChild(n) + } + + if n.Type == ElementNode { + p.oe = append(p.oe, n) + } +} + +// shouldFosterParent returns whether the next node to be added should be +// foster parented. +func (p *parser) shouldFosterParent() bool { + if p.fosterParenting { + switch p.top().DataAtom { + case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: + return true + } + } + return false +} + +// fosterParent adds a child node according to the foster parenting rules. +// Section 12.2.6.1, "foster parenting". +func (p *parser) fosterParent(n *Node) { + var table, parent, prev *Node + var i int + for i = len(p.oe) - 1; i >= 0; i-- { + if p.oe[i].DataAtom == a.Table { + table = p.oe[i] + break + } + } + + if table == nil { + // The foster parent is the html element. + parent = p.oe[0] + } else { + parent = table.Parent + } + if parent == nil { + parent = p.oe[i-1] + } + + if table != nil { + prev = table.PrevSibling + } else { + prev = parent.LastChild + } + if prev != nil && prev.Type == TextNode && n.Type == TextNode { + prev.Data += n.Data + return + } + + parent.InsertBefore(n, table) +} + +// addText adds text to the preceding node if it is a text node, or else it +// calls addChild with a new text node. +func (p *parser) addText(text string) { + if text == "" { + return + } + + if p.shouldFosterParent() { + p.fosterParent(&Node{ + Type: TextNode, + Data: text, + }) + return + } + + t := p.top() + if n := t.LastChild; n != nil && n.Type == TextNode { + n.Data += text + return + } + p.addChild(&Node{ + Type: TextNode, + Data: text, + }) +} + +// addElement adds a child element based on the current token. +func (p *parser) addElement() { + p.addChild(&Node{ + Type: ElementNode, + DataAtom: p.tok.DataAtom, + Data: p.tok.Data, + Attr: p.tok.Attr, + }) +} + +// Section 12.2.4.3. +func (p *parser) addFormattingElement() { + tagAtom, attr := p.tok.DataAtom, p.tok.Attr + p.addElement() + + // Implement the Noah's Ark clause, but with three per family instead of two. + identicalElements := 0 +findIdenticalElements: + for i := len(p.afe) - 1; i >= 0; i-- { + n := p.afe[i] + if n.Type == scopeMarkerNode { + break + } + if n.Type != ElementNode { + continue + } + if n.Namespace != "" { + continue + } + if n.DataAtom != tagAtom { + continue + } + if len(n.Attr) != len(attr) { + continue + } + compareAttributes: + for _, t0 := range n.Attr { + for _, t1 := range attr { + if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { + // Found a match for this attribute, continue with the next attribute. + continue compareAttributes + } + } + // If we get here, there is no attribute that matches a. + // Therefore the element is not identical to the new one. + continue findIdenticalElements + } + + identicalElements++ + if identicalElements >= 3 { + p.afe.remove(n) + } + } + + p.afe = append(p.afe, p.top()) +} + +// Section 12.2.4.3. +func (p *parser) clearActiveFormattingElements() { + for { + n := p.afe.pop() + if len(p.afe) == 0 || n.Type == scopeMarkerNode { + return + } + } +} + +// Section 12.2.4.3. +func (p *parser) reconstructActiveFormattingElements() { + n := p.afe.top() + if n == nil { + return + } + if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { + return + } + i := len(p.afe) - 1 + for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { + if i == 0 { + i = -1 + break + } + i-- + n = p.afe[i] + } + for { + i++ + clone := p.afe[i].clone() + p.addChild(clone) + p.afe[i] = clone + if i == len(p.afe)-1 { + break + } + } +} + +// Section 12.2.5. +func (p *parser) acknowledgeSelfClosingTag() { + p.hasSelfClosingToken = false +} + +// An insertion mode (section 12.2.4.1) is the state transition function from +// a particular state in the HTML5 parser's state machine. It updates the +// parser's fields depending on parser.tok (where ErrorToken means EOF). +// It returns whether the token was consumed. +type insertionMode func(*parser) bool + +// setOriginalIM sets the insertion mode to return to after completing a text or +// inTableText insertion mode. +// Section 12.2.4.1, "using the rules for". +func (p *parser) setOriginalIM() { + if p.originalIM != nil { + panic("html: bad parser state: originalIM was set twice") + } + p.originalIM = p.im +} + +// Section 12.2.4.1, "reset the insertion mode". +func (p *parser) resetInsertionMode() { + for i := len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if i == 0 && p.context != nil { + n = p.context + } + + switch n.DataAtom { + case a.Select: + p.im = inSelectIM + case a.Td, a.Th: + p.im = inCellIM + case a.Tr: + p.im = inRowIM + case a.Tbody, a.Thead, a.Tfoot: + p.im = inTableBodyIM + case a.Caption: + p.im = inCaptionIM + case a.Colgroup: + p.im = inColumnGroupIM + case a.Table: + p.im = inTableIM + case a.Head: + p.im = inBodyIM + case a.Body: + p.im = inBodyIM + case a.Frameset: + p.im = inFramesetIM + case a.Html: + p.im = beforeHeadIM + default: + continue + } + return + } + p.im = inBodyIM +} + +const whitespace = " \t\r\n\f" + +// Section 12.2.6.4.1. +func initialIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + n, quirks := parseDoctype(p.tok.Data) + p.doc.AppendChild(n) + p.quirks = quirks + p.im = beforeHTMLIM + return true + } + p.quirks = true + p.im = beforeHTMLIM + return false +} + +// Section 12.2.6.4.2. +func beforeHTMLIM(p *parser) bool { + switch p.tok.Type { + case DoctypeToken: + // Ignore the token. + return true + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + if p.tok.DataAtom == a.Html { + p.addElement() + p.im = beforeHeadIM + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + } + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false +} + +// Section 12.2.6.4.3. +func beforeHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Head: + p.addElement() + p.head = p.top() + p.im = inHeadIM + return true + case a.Html: + return inBodyIM(p) + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.6.4.4. +func inHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: + p.addElement() + p.oe.pop() + p.acknowledgeSelfClosingTag() + return true + case a.Script, a.Title, a.Noscript, a.Noframes, a.Style: + p.addElement() + p.setOriginalIM() + p.im = textIM + return true + case a.Head: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head: + n := p.oe.pop() + if n.DataAtom != a.Head { + panic("html: bad parser state: element not found, in the in-head insertion mode") + } + p.im = afterHeadIM + return true + case a.Body, a.Html, a.Br: + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.6.4.6. +func afterHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Body: + p.addElement() + p.framesetOK = false + p.im = inBodyIM + return true + case a.Frameset: + p.addElement() + p.im = inFramesetIM + return true + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title: + p.oe = append(p.oe, p.head) + defer p.oe.remove(p.head) + return inHeadIM(p) + case a.Head: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Body, a.Html, a.Br: + // Drop down to creating an implied tag. + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) + p.framesetOK = true + return false +} + +// copyAttributes copies attributes of src not found on dst to dst. +func copyAttributes(dst *Node, src Token) { + if len(src.Attr) == 0 { + return + } + attr := map[string]string{} + for _, t := range dst.Attr { + attr[t.Key] = t.Val + } + for _, t := range src.Attr { + if _, ok := attr[t.Key]; !ok { + dst.Attr = append(dst.Attr, t) + attr[t.Key] = t.Val + } + } +} + +// Section 12.2.6.4.7. +func inBodyIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + d := p.tok.Data + switch n := p.oe.top(); n.DataAtom { + case a.Pre, a.Listing: + if n.FirstChild == nil { + // Ignore a newline at the start of a
 block.
+				if d != "" && d[0] == '\r' {
+					d = d[1:]
+				}
+				if d != "" && d[0] == '\n' {
+					d = d[1:]
+				}
+			}
+		}
+		d = strings.Replace(d, "\x00", "", -1)
+		if d == "" {
+			return true
+		}
+		p.reconstructActiveFormattingElements()
+		p.addText(d)
+		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
+			// There were non-whitespace characters inserted.
+			p.framesetOK = false
+		}
+	case StartTagToken:
+		switch p.tok.DataAtom {
+		case a.Html:
+			copyAttributes(p.oe[0], p.tok)
+		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
+			return inHeadIM(p)
+		case a.Body:
+			if len(p.oe) >= 2 {
+				body := p.oe[1]
+				if body.Type == ElementNode && body.DataAtom == a.Body {
+					p.framesetOK = false
+					copyAttributes(body, p.tok)
+				}
+			}
+		case a.Frameset:
+			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
+				// Ignore the token.
+				return true
+			}
+			body := p.oe[1]
+			if body.Parent != nil {
+				body.Parent.RemoveChild(body)
+			}
+			p.oe = p.oe[:1]
+			p.addElement()
+			p.im = inFramesetIM
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(buttonScope, a.P)
+			switch n := p.top(); n.DataAtom {
+			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+				p.oe.pop()
+			}
+			p.addElement()
+		case a.Pre, a.Listing:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			// The newline, if any, will be dealt with by the TextToken case.
+			p.framesetOK = false
+		case a.Form:
+			if p.form == nil {
+				p.popUntil(buttonScope, a.P)
+				p.addElement()
+				p.form = p.top()
+			}
+		case a.Li:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Li:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Dd, a.Dt:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Dd, a.Dt:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Plaintext:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Button:
+			p.popUntil(defaultScope, a.Button)
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+		case a.A:
+			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
+				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
+					p.inBodyEndTagFormatting(a.A)
+					p.oe.remove(n)
+					p.afe.remove(n)
+					break
+				}
+			}
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.Nobr:
+			p.reconstructActiveFormattingElements()
+			if p.elementInScope(defaultScope, a.Nobr) {
+				p.inBodyEndTagFormatting(a.Nobr)
+				p.reconstructActiveFormattingElements()
+			}
+			p.addFormattingElement()
+		case a.Applet, a.Marquee, a.Object:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.afe = append(p.afe, &scopeMarker)
+			p.framesetOK = false
+		case a.Table:
+			if !p.quirks {
+				p.popUntil(buttonScope, a.P)
+			}
+			p.addElement()
+			p.framesetOK = false
+			p.im = inTableIM
+			return true
+		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			if p.tok.DataAtom == a.Input {
+				for _, t := range p.tok.Attr {
+					if t.Key == "type" {
+						if strings.ToLower(t.Val) == "hidden" {
+							// Skip setting framesetOK = false
+							return true
+						}
+					}
+				}
+			}
+			p.framesetOK = false
+		case a.Param, a.Source, a.Track:
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+		case a.Hr:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			p.framesetOK = false
+		case a.Image:
+			p.tok.DataAtom = a.Img
+			p.tok.Data = a.Img.String()
+			return false
+		case a.Isindex:
+			if p.form != nil {
+				// Ignore the token.
+				return true
+			}
+			action := ""
+			prompt := "This is a searchable index. Enter search keywords: "
+			attr := []Attribute{{Key: "name", Val: "isindex"}}
+			for _, t := range p.tok.Attr {
+				switch t.Key {
+				case "action":
+					action = t.Val
+				case "name":
+					// Ignore the attribute.
+				case "prompt":
+					prompt = t.Val
+				default:
+					attr = append(attr, t)
+				}
+			}
+			p.acknowledgeSelfClosingTag()
+			p.popUntil(buttonScope, a.P)
+			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
+			if action != "" {
+				p.form.Attr = []Attribute{{Key: "action", Val: action}}
+			}
+			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
+			p.addText(prompt)
+			p.addChild(&Node{
+				Type:     ElementNode,
+				DataAtom: a.Input,
+				Data:     a.Input.String(),
+				Attr:     attr,
+			})
+			p.oe.pop()
+			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
+			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
+		case a.Textarea:
+			p.addElement()
+			p.setOriginalIM()
+			p.framesetOK = false
+			p.im = textIM
+		case a.Xmp:
+			p.popUntil(buttonScope, a.P)
+			p.reconstructActiveFormattingElements()
+			p.framesetOK = false
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Iframe:
+			p.framesetOK = false
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Noembed, a.Noscript:
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Select:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+			p.im = inSelectIM
+			return true
+		case a.Optgroup, a.Option:
+			if p.top().DataAtom == a.Option {
+				p.oe.pop()
+			}
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		case a.Rp, a.Rt:
+			if p.elementInScope(defaultScope, a.Ruby) {
+				p.generateImpliedEndTags()
+			}
+			p.addElement()
+		case a.Math, a.Svg:
+			p.reconstructActiveFormattingElements()
+			if p.tok.DataAtom == a.Math {
+				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
+			} else {
+				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
+			}
+			adjustForeignAttributes(p.tok.Attr)
+			p.addElement()
+			p.top().Namespace = p.tok.Data
+			if p.hasSelfClosingToken {
+				p.oe.pop()
+				p.acknowledgeSelfClosingTag()
+			}
+			return true
+		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+			// Ignore the token.
+		default:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		}
+	case EndTagToken:
+		switch p.tok.DataAtom {
+		case a.Body:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.im = afterBodyIM
+			}
+		case a.Html:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
+				return false
+			}
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.Form:
+			node := p.form
+			p.form = nil
+			i := p.indexOfElementInScope(defaultScope, a.Form)
+			if node == nil || i == -1 || p.oe[i] != node {
+				// Ignore the token.
+				return true
+			}
+			p.generateImpliedEndTags()
+			p.oe.remove(node)
+		case a.P:
+			if !p.elementInScope(buttonScope, a.P) {
+				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
+			}
+			p.popUntil(buttonScope, a.P)
+		case a.Li:
+			p.popUntil(listItemScope, a.Li)
+		case a.Dd, a.Dt:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
+		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.inBodyEndTagFormatting(p.tok.DataAtom)
+		case a.Applet, a.Marquee, a.Object:
+			if p.popUntil(defaultScope, p.tok.DataAtom) {
+				p.clearActiveFormattingElements()
+			}
+		case a.Br:
+			p.tok.Type = StartTagToken
+			return false
+		default:
+			p.inBodyEndTagOther(p.tok.DataAtom)
+		}
+	case CommentToken:
+		p.addChild(&Node{
+			Type: CommentNode,
+			Data: p.tok.Data,
+		})
+	}
+
+	return true
+}
+
+func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
+	// This is the "adoption agency" algorithm, described at
+	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
+
+	// TODO: this is a fairly literal line-by-line translation of that algorithm.
+	// Once the code successfully parses the comprehensive test suite, we should
+	// refactor this code to be more idiomatic.
+
+	// Steps 1-4. The outer loop.
+	for i := 0; i < 8; i++ {
+		// Step 5. Find the formatting element.
+		var formattingElement *Node
+		for j := len(p.afe) - 1; j >= 0; j-- {
+			if p.afe[j].Type == scopeMarkerNode {
+				break
+			}
+			if p.afe[j].DataAtom == tagAtom {
+				formattingElement = p.afe[j]
+				break
+			}
+		}
+		if formattingElement == nil {
+			p.inBodyEndTagOther(tagAtom)
+			return
+		}
+		feIndex := p.oe.index(formattingElement)
+		if feIndex == -1 {
+			p.afe.remove(formattingElement)
+			return
+		}
+		if !p.elementInScope(defaultScope, tagAtom) {
+			// Ignore the tag.
+			return
+		}
+
+		// Steps 9-10. Find the furthest block.
+		var furthestBlock *Node
+		for _, e := range p.oe[feIndex:] {
+			if isSpecialElement(e) {
+				furthestBlock = e
+				break
+			}
+		}
+		if furthestBlock == nil {
+			e := p.oe.pop()
+			for e != formattingElement {
+				e = p.oe.pop()
+			}
+			p.afe.remove(e)
+			return
+		}
+
+		// Steps 11-12. Find the common ancestor and bookmark node.
+		commonAncestor := p.oe[feIndex-1]
+		bookmark := p.afe.index(formattingElement)
+
+		// Step 13. The inner loop. Find the lastNode to reparent.
+		lastNode := furthestBlock
+		node := furthestBlock
+		x := p.oe.index(node)
+		// Steps 13.1-13.2
+		for j := 0; j < 3; j++ {
+			// Step 13.3.
+			x--
+			node = p.oe[x]
+			// Step 13.4 - 13.5.
+			if p.afe.index(node) == -1 {
+				p.oe.remove(node)
+				continue
+			}
+			// Step 13.6.
+			if node == formattingElement {
+				break
+			}
+			// Step 13.7.
+			clone := node.clone()
+			p.afe[p.afe.index(node)] = clone
+			p.oe[p.oe.index(node)] = clone
+			node = clone
+			// Step 13.8.
+			if lastNode == furthestBlock {
+				bookmark = p.afe.index(node) + 1
+			}
+			// Step 13.9.
+			if lastNode.Parent != nil {
+				lastNode.Parent.RemoveChild(lastNode)
+			}
+			node.AppendChild(lastNode)
+			// Step 13.10.
+			lastNode = node
+		}
+
+		// Step 14. Reparent lastNode to the common ancestor,
+		// or for misnested table nodes, to the foster parent.
+		if lastNode.Parent != nil {
+			lastNode.Parent.RemoveChild(lastNode)
+		}
+		switch commonAncestor.DataAtom {
+		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+			p.fosterParent(lastNode)
+		default:
+			commonAncestor.AppendChild(lastNode)
+		}
+
+		// Steps 15-17. Reparent nodes from the furthest block's children
+		// to a clone of the formatting element.
+		clone := formattingElement.clone()
+		reparentChildren(clone, furthestBlock)
+		furthestBlock.AppendChild(clone)
+
+		// Step 18. Fix up the list of active formatting elements.
+		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
+			// Move the bookmark with the rest of the list.
+			bookmark--
+		}
+		p.afe.remove(formattingElement)
+		p.afe.insert(bookmark, clone)
+
+		// Step 19. Fix up the stack of open elements.
+		p.oe.remove(formattingElement)
+		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
+	}
+}
+
+// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
+// "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content
+// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
+func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
+	for i := len(p.oe) - 1; i >= 0; i-- {
+		if p.oe[i].DataAtom == tagAtom {
+			p.oe = p.oe[:i]
+			break
+		}
+		if isSpecialElement(p.oe[i]) {
+			break
+		}
+	}
+}
+
+// Section 12.2.6.4.8.
+func textIM(p *parser) bool {
+	switch p.tok.Type {
+	case ErrorToken:
+		p.oe.pop()
+	case TextToken:
+		d := p.tok.Data
+		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
+			// Ignore a newline at the start of a -->
+#errors
+#document
+| 
+|   
+|   
+|     -->
+#errors
+#document
+| 
+|   
+|   
+|     
+#errors
+Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
+#document
+| 
+|   
+|   
+|     
+#errors
+Line: 1 Col: 9 Unexpected end tag (strong). Expected DOCTYPE.
+Line: 1 Col: 9 Unexpected end tag (strong) after the (implied) root element.
+Line: 1 Col: 13 Unexpected end tag (b) after the (implied) root element.
+Line: 1 Col: 18 Unexpected end tag (em) after the (implied) root element.
+Line: 1 Col: 22 Unexpected end tag (i) after the (implied) root element.
+Line: 1 Col: 26 Unexpected end tag (u) after the (implied) root element.
+Line: 1 Col: 35 Unexpected end tag (strike) after the (implied) root element.
+Line: 1 Col: 39 Unexpected end tag (s) after the (implied) root element.
+Line: 1 Col: 47 Unexpected end tag (blink) after the (implied) root element.
+Line: 1 Col: 52 Unexpected end tag (tt) after the (implied) root element.
+Line: 1 Col: 58 Unexpected end tag (pre) after the (implied) root element.
+Line: 1 Col: 64 Unexpected end tag (big) after the (implied) root element.
+Line: 1 Col: 72 Unexpected end tag (small) after the (implied) root element.
+Line: 1 Col: 79 Unexpected end tag (font) after the (implied) root element.
+Line: 1 Col: 88 Unexpected end tag (select) after the (implied) root element.
+Line: 1 Col: 93 Unexpected end tag (h1) after the (implied) root element.
+Line: 1 Col: 98 Unexpected end tag (h2) after the (implied) root element.
+Line: 1 Col: 103 Unexpected end tag (h3) after the (implied) root element.
+Line: 1 Col: 108 Unexpected end tag (h4) after the (implied) root element.
+Line: 1 Col: 113 Unexpected end tag (h5) after the (implied) root element.
+Line: 1 Col: 118 Unexpected end tag (h6) after the (implied) root element.
+Line: 1 Col: 125 Unexpected end tag (body) after the (implied) root element.
+Line: 1 Col: 130 Unexpected end tag (br). Treated as br element.
+Line: 1 Col: 134 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 140 This element (img) has no end tag.
+Line: 1 Col: 148 Unexpected end tag (title). Ignored.
+Line: 1 Col: 155 Unexpected end tag (span). Ignored.
+Line: 1 Col: 163 Unexpected end tag (style). Ignored.
+Line: 1 Col: 172 Unexpected end tag (script). Ignored.
+Line: 1 Col: 180 Unexpected end tag (table). Ignored.
+Line: 1 Col: 185 Unexpected end tag (th). Ignored.
+Line: 1 Col: 190 Unexpected end tag (td). Ignored.
+Line: 1 Col: 195 Unexpected end tag (tr). Ignored.
+Line: 1 Col: 203 This element (frame) has no end tag.
+Line: 1 Col: 210 This element (area) has no end tag.
+Line: 1 Col: 217 Unexpected end tag (link). Ignored.
+Line: 1 Col: 225 This element (param) has no end tag.
+Line: 1 Col: 230 This element (hr) has no end tag.
+Line: 1 Col: 238 This element (input) has no end tag.
+Line: 1 Col: 244 Unexpected end tag (col). Ignored.
+Line: 1 Col: 251 Unexpected end tag (base). Ignored.
+Line: 1 Col: 258 Unexpected end tag (meta). Ignored.
+Line: 1 Col: 269 This element (basefont) has no end tag.
+Line: 1 Col: 279 This element (bgsound) has no end tag.
+Line: 1 Col: 287 This element (embed) has no end tag.
+Line: 1 Col: 296 This element (spacer) has no end tag.
+Line: 1 Col: 300 Unexpected end tag (p). Ignored.
+Line: 1 Col: 305 End tag (dd) seen too early. Expected other end tag.
+Line: 1 Col: 310 End tag (dt) seen too early. Expected other end tag.
+Line: 1 Col: 320 Unexpected end tag (caption). Ignored.
+Line: 1 Col: 331 Unexpected end tag (colgroup). Ignored.
+Line: 1 Col: 339 Unexpected end tag (tbody). Ignored.
+Line: 1 Col: 347 Unexpected end tag (tfoot). Ignored.
+Line: 1 Col: 355 Unexpected end tag (thead). Ignored.
+Line: 1 Col: 365 End tag (address) seen too early. Expected other end tag.
+Line: 1 Col: 378 End tag (blockquote) seen too early. Expected other end tag.
+Line: 1 Col: 387 End tag (center) seen too early. Expected other end tag.
+Line: 1 Col: 393 Unexpected end tag (dir). Ignored.
+Line: 1 Col: 399 End tag (div) seen too early. Expected other end tag.
+Line: 1 Col: 404 End tag (dl) seen too early. Expected other end tag.
+Line: 1 Col: 415 End tag (fieldset) seen too early. Expected other end tag.
+Line: 1 Col: 425 End tag (listing) seen too early. Expected other end tag.
+Line: 1 Col: 432 End tag (menu) seen too early. Expected other end tag.
+Line: 1 Col: 437 End tag (ol) seen too early. Expected other end tag.
+Line: 1 Col: 442 End tag (ul) seen too early. Expected other end tag.
+Line: 1 Col: 447 End tag (li) seen too early. Expected other end tag.
+Line: 1 Col: 454 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm.
+Line: 1 Col: 460 This element (wbr) has no end tag.
+Line: 1 Col: 476 End tag (button) seen too early. Expected other end tag.
+Line: 1 Col: 486 End tag (marquee) seen too early. Expected other end tag.
+Line: 1 Col: 495 End tag (object) seen too early. Expected other end tag.
+Line: 1 Col: 513 Unexpected end tag (html). Ignored.
+Line: 1 Col: 513 Unexpected end tag (frameset). Ignored.
+Line: 1 Col: 520 Unexpected end tag (head). Ignored.
+Line: 1 Col: 529 Unexpected end tag (iframe). Ignored.
+Line: 1 Col: 537 This element (image) has no end tag.
+Line: 1 Col: 547 This element (isindex) has no end tag.
+Line: 1 Col: 557 Unexpected end tag (noembed). Ignored.
+Line: 1 Col: 568 Unexpected end tag (noframes). Ignored.
+Line: 1 Col: 579 Unexpected end tag (noscript). Ignored.
+Line: 1 Col: 590 Unexpected end tag (optgroup). Ignored.
+Line: 1 Col: 599 Unexpected end tag (option). Ignored.
+Line: 1 Col: 611 Unexpected end tag (plaintext). Ignored.
+Line: 1 Col: 622 Unexpected end tag (textarea). Ignored.
+#document
+| 
+|   
+|   
+|     
+|

+ +#data +

+#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end tag (strong) in table context caused voodoo mode. +Line: 1 Col: 20 End tag (strong) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 24 Unexpected end tag (b) in table context caused voodoo mode. +Line: 1 Col: 24 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 29 Unexpected end tag (em) in table context caused voodoo mode. +Line: 1 Col: 29 End tag (em) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 33 Unexpected end tag (i) in table context caused voodoo mode. +Line: 1 Col: 33 End tag (i) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 37 Unexpected end tag (u) in table context caused voodoo mode. +Line: 1 Col: 37 End tag (u) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 46 Unexpected end tag (strike) in table context caused voodoo mode. +Line: 1 Col: 46 End tag (strike) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 50 Unexpected end tag (s) in table context caused voodoo mode. +Line: 1 Col: 50 End tag (s) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 58 Unexpected end tag (blink) in table context caused voodoo mode. +Line: 1 Col: 58 Unexpected end tag (blink). Ignored. +Line: 1 Col: 63 Unexpected end tag (tt) in table context caused voodoo mode. +Line: 1 Col: 63 End tag (tt) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 69 Unexpected end tag (pre) in table context caused voodoo mode. +Line: 1 Col: 69 End tag (pre) seen too early. Expected other end tag. +Line: 1 Col: 75 Unexpected end tag (big) in table context caused voodoo mode. +Line: 1 Col: 75 End tag (big) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 83 Unexpected end tag (small) in table context caused voodoo mode. +Line: 1 Col: 83 End tag (small) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 90 Unexpected end tag (font) in table context caused voodoo mode. +Line: 1 Col: 90 End tag (font) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 99 Unexpected end tag (select) in table context caused voodoo mode. +Line: 1 Col: 99 Unexpected end tag (select). Ignored. +Line: 1 Col: 104 Unexpected end tag (h1) in table context caused voodoo mode. +Line: 1 Col: 104 End tag (h1) seen too early. Expected other end tag. +Line: 1 Col: 109 Unexpected end tag (h2) in table context caused voodoo mode. +Line: 1 Col: 109 End tag (h2) seen too early. Expected other end tag. +Line: 1 Col: 114 Unexpected end tag (h3) in table context caused voodoo mode. +Line: 1 Col: 114 End tag (h3) seen too early. Expected other end tag. +Line: 1 Col: 119 Unexpected end tag (h4) in table context caused voodoo mode. +Line: 1 Col: 119 End tag (h4) seen too early. Expected other end tag. +Line: 1 Col: 124 Unexpected end tag (h5) in table context caused voodoo mode. +Line: 1 Col: 124 End tag (h5) seen too early. Expected other end tag. +Line: 1 Col: 129 Unexpected end tag (h6) in table context caused voodoo mode. +Line: 1 Col: 129 End tag (h6) seen too early. Expected other end tag. +Line: 1 Col: 136 Unexpected end tag (body) in the table row phase. Ignored. +Line: 1 Col: 141 Unexpected end tag (br) in table context caused voodoo mode. +Line: 1 Col: 141 Unexpected end tag (br). Treated as br element. +Line: 1 Col: 145 Unexpected end tag (a) in table context caused voodoo mode. +Line: 1 Col: 145 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 151 Unexpected end tag (img) in table context caused voodoo mode. +Line: 1 Col: 151 This element (img) has no end tag. +Line: 1 Col: 159 Unexpected end tag (title) in table context caused voodoo mode. +Line: 1 Col: 159 Unexpected end tag (title). Ignored. +Line: 1 Col: 166 Unexpected end tag (span) in table context caused voodoo mode. +Line: 1 Col: 166 Unexpected end tag (span). Ignored. +Line: 1 Col: 174 Unexpected end tag (style) in table context caused voodoo mode. +Line: 1 Col: 174 Unexpected end tag (style). Ignored. +Line: 1 Col: 183 Unexpected end tag (script) in table context caused voodoo mode. +Line: 1 Col: 183 Unexpected end tag (script). Ignored. +Line: 1 Col: 196 Unexpected end tag (th). Ignored. +Line: 1 Col: 201 Unexpected end tag (td). Ignored. +Line: 1 Col: 206 Unexpected end tag (tr). Ignored. +Line: 1 Col: 214 This element (frame) has no end tag. +Line: 1 Col: 221 This element (area) has no end tag. +Line: 1 Col: 228 Unexpected end tag (link). Ignored. +Line: 1 Col: 236 This element (param) has no end tag. +Line: 1 Col: 241 This element (hr) has no end tag. +Line: 1 Col: 249 This element (input) has no end tag. +Line: 1 Col: 255 Unexpected end tag (col). Ignored. +Line: 1 Col: 262 Unexpected end tag (base). Ignored. +Line: 1 Col: 269 Unexpected end tag (meta). Ignored. +Line: 1 Col: 280 This element (basefont) has no end tag. +Line: 1 Col: 290 This element (bgsound) has no end tag. +Line: 1 Col: 298 This element (embed) has no end tag. +Line: 1 Col: 307 This element (spacer) has no end tag. +Line: 1 Col: 311 Unexpected end tag (p). Ignored. +Line: 1 Col: 316 End tag (dd) seen too early. Expected other end tag. +Line: 1 Col: 321 End tag (dt) seen too early. Expected other end tag. +Line: 1 Col: 331 Unexpected end tag (caption). Ignored. +Line: 1 Col: 342 Unexpected end tag (colgroup). Ignored. +Line: 1 Col: 350 Unexpected end tag (tbody). Ignored. +Line: 1 Col: 358 Unexpected end tag (tfoot). Ignored. +Line: 1 Col: 366 Unexpected end tag (thead). Ignored. +Line: 1 Col: 376 End tag (address) seen too early. Expected other end tag. +Line: 1 Col: 389 End tag (blockquote) seen too early. Expected other end tag. +Line: 1 Col: 398 End tag (center) seen too early. Expected other end tag. +Line: 1 Col: 404 Unexpected end tag (dir). Ignored. +Line: 1 Col: 410 End tag (div) seen too early. Expected other end tag. +Line: 1 Col: 415 End tag (dl) seen too early. Expected other end tag. +Line: 1 Col: 426 End tag (fieldset) seen too early. Expected other end tag. +Line: 1 Col: 436 End tag (listing) seen too early. Expected other end tag. +Line: 1 Col: 443 End tag (menu) seen too early. Expected other end tag. +Line: 1 Col: 448 End tag (ol) seen too early. Expected other end tag. +Line: 1 Col: 453 End tag (ul) seen too early. Expected other end tag. +Line: 1 Col: 458 End tag (li) seen too early. Expected other end tag. +Line: 1 Col: 465 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 471 This element (wbr) has no end tag. +Line: 1 Col: 487 End tag (button) seen too early. Expected other end tag. +Line: 1 Col: 497 End tag (marquee) seen too early. Expected other end tag. +Line: 1 Col: 506 End tag (object) seen too early. Expected other end tag. +Line: 1 Col: 524 Unexpected end tag (html). Ignored. +Line: 1 Col: 524 Unexpected end tag (frameset). Ignored. +Line: 1 Col: 531 Unexpected end tag (head). Ignored. +Line: 1 Col: 540 Unexpected end tag (iframe). Ignored. +Line: 1 Col: 548 This element (image) has no end tag. +Line: 1 Col: 558 This element (isindex) has no end tag. +Line: 1 Col: 568 Unexpected end tag (noembed). Ignored. +Line: 1 Col: 579 Unexpected end tag (noframes). Ignored. +Line: 1 Col: 590 Unexpected end tag (noscript). Ignored. +Line: 1 Col: 601 Unexpected end tag (optgroup). Ignored. +Line: 1 Col: 610 Unexpected end tag (option). Ignored. +Line: 1 Col: 622 Unexpected end tag (plaintext). Ignored. +Line: 1 Col: 633 Unexpected end tag (textarea). Ignored. +#document +| +| +| +|
+| +| +| +|

+ +#data + +#errors +Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE. +Line: 1 Col: 10 Expected closing tag. Unexpected end of file. +#document +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat new file mode 100644 index 000000000..4f8df86f2 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat @@ -0,0 +1,799 @@ +#data + +#errors +#document +| +| +| +| +| + +#data +a +#errors +29: Bogus comment +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| + +#data + +#errors +35: Stray “svg” start tag. +42: Stray end tag “svg” +#document +| +| +| +| +| +#errors +43: Stray “svg” start tag. +50: Stray end tag “svg” +#document +| +| +| +| +|

+#errors +34: Start tag “svg” seen in “table”. +41: Stray end tag “svg”. +#document +| +| +| +| +| +| + +#data +
foo
+#errors +34: Start tag “svg” seen in “table”. +46: Stray end tag “g”. +53: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| + +#data +
foobar
+#errors +34: Start tag “svg” seen in “table”. +46: Stray end tag “g”. +58: Stray end tag “g”. +65: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| + +#data +
foobar
+#errors +41: Start tag “svg” seen in “table”. +53: Stray end tag “g”. +65: Stray end tag “g”. +72: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| +| + +#data +
foobar
+#errors +45: Start tag “svg” seen in “table”. +57: Stray end tag “g”. +69: Stray end tag “g”. +76: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| +| +| + +#data +
foobar
+#errors +#document +| +| +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" + +#data +
foobar

baz

+#errors +#document +| +| +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +
foobar

baz

+#errors +#document +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +
foobar

baz

quux +#errors +70: HTML start tag “p” in a foreign namespace context. +81: “table” closed but “caption” was still open. +#document +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +|

+| "baz" +|

+| "quux" + +#data +
foobarbaz

quux +#errors +78: “table” closed but “caption” was still open. +78: Unclosed elements on stack. +#document +| +| +| +| +| +|
+| +| +| "foo" +| +| "bar" +| "baz" +|

+| "quux" + +#data +foobar

baz

quux +#errors +44: Start tag “svg” seen in “table”. +56: Stray end tag “g”. +68: Stray end tag “g”. +71: HTML start tag “p” in a foreign namespace context. +71: Start tag “p” seen in “table”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

+| "baz" +| +| +|

+| "quux" + +#data +

quux +#errors +50: Stray “svg” start tag. +54: Stray “g” start tag. +62: Stray end tag “g” +66: Stray “g” start tag. +74: Stray end tag “g” +77: Stray “p” start tag. +88: “table” end tag with “select” open. +#document +| +| +| +| +| +| +| +|
+|

quux +#errors +36: Start tag “select” seen in “table”. +42: Stray “svg” start tag. +46: Stray “g” start tag. +54: Stray end tag “g” +58: Stray “g” start tag. +66: Stray end tag “g” +69: Stray “p” start tag. +80: “table” end tag with “select” open. +#document +| +| +| +| +| +|

+| "quux" + +#data +foobar

baz +#errors +41: Stray “svg” start tag. +68: HTML start tag “p” in a foreign namespace context. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +foobar

baz +#errors +34: Stray “svg” start tag. +61: HTML start tag “p” in a foreign namespace context. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

+| "baz" + +#data +

+#errors +31: Stray “svg” start tag. +35: Stray “g” start tag. +40: Stray end tag “g” +44: Stray “g” start tag. +49: Stray end tag “g” +52: Stray “p” start tag. +58: Stray “span” start tag. +58: End of file seen and there were open elements. +#document +| +| +| +| + +#data +

+#errors +42: Stray “svg” start tag. +46: Stray “g” start tag. +51: Stray end tag “g” +55: Stray “g” start tag. +60: Stray end tag “g” +63: Stray “p” start tag. +69: Stray “span” start tag. +#document +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| +| xlink href="foo" + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" + +#data +bar +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" +| "bar" + +#data + +#errors +#document +| +| +| +| + +#data +

a +#errors +#document +| +| +| +|
+| +| "a" + +#data +
a +#errors +#document +| +| +| +|
+| +| +| "a" + +#data +
+#errors +#document +| +| +| +|
+| +| +| + +#data +
a +#errors +#document +| +| +| +|
+| +| +| +| +| "a" + +#data +

a +#errors +#document +| +| +| +|

+| +| +| +|

+| "a" + +#data +
    a +#errors +40: HTML start tag “ul” in a foreign namespace context. +41: End of file in a foreign namespace context. +#document +| +| +| +| +| +| +|
    +| +|
      +| "a" + +#data +
        a +#errors +35: HTML start tag “ul” in a foreign namespace context. +36: End of file in a foreign namespace context. +#document +| +| +| +| +| +| +| +|
          +| "a" + +#data +

          +#errors +#document +| +| +| +| +|

          +| +| +|

          + +#data +

          +#errors +#document +| +| +| +| +|

          +| +| +|

          + +#data +

          +#errors +#document +| +| +| +|

          +| +| +| +|

          +|

          + +#data +
          +#errors +#document +| +| +| +| +| +|
          +| +|
          +| +| + +#data +
          +#errors +#document +| +| +| +| +| +| +| +|
          +|
          +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data +

+#errors +#document +| +| +| +| +|
+| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| + +#data +
+#errors +#document +| +| +| +| +| +| +| +|
+| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat new file mode 100644 index 000000000..638cde479 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat @@ -0,0 +1,482 @@ +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributename="" +| attributetype="" +| basefrequency="" +| baseprofile="" +| calcmode="" +| clippathunits="" +| contentscripttype="" +| contentstyletype="" +| diffuseconstant="" +| edgemode="" +| externalresourcesrequired="" +| filterres="" +| filterunits="" +| glyphref="" +| gradienttransform="" +| gradientunits="" +| kernelmatrix="" +| kernelunitlength="" +| keypoints="" +| keysplines="" +| keytimes="" +| lengthadjust="" +| limitingconeangle="" +| markerheight="" +| markerunits="" +| markerwidth="" +| maskcontentunits="" +| maskunits="" +| numoctaves="" +| pathlength="" +| patterncontentunits="" +| patterntransform="" +| patternunits="" +| pointsatx="" +| pointsaty="" +| pointsatz="" +| preservealpha="" +| preserveaspectratio="" +| primitiveunits="" +| refx="" +| refy="" +| repeatcount="" +| repeatdur="" +| requiredextensions="" +| requiredfeatures="" +| specularconstant="" +| specularexponent="" +| spreadmethod="" +| startoffset="" +| stddeviation="" +| stitchtiles="" +| surfacescale="" +| systemlanguage="" +| tablevalues="" +| targetx="" +| targety="" +| textlength="" +| viewbox="" +| viewtarget="" +| xchannelselector="" +| ychannelselector="" +| zoomandpan="" + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat new file mode 100644 index 000000000..63107d277 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat @@ -0,0 +1,62 @@ +#data +

foobazeggs

spam

quuxbar +#errors +#document +| +| +| +| +|

+| "foo" +| +| +| +| "baz" +| +| +| +| +| "eggs" +| +| +|

+| "spam" +| +| +| +|
+| +| +| "quux" +| "bar" + +#data +foobazeggs

spam
quuxbar +#errors +#document +| +| +| +| +| "foo" +| +| +| +| "baz" +| +| +| +| +| "eggs" +| +| +|

+| "spam" +| +| +| +|
+| +| +| "quux" +| "bar" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat new file mode 100644 index 000000000..b8713f885 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat @@ -0,0 +1,74 @@ +#data + +#errors +#document +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +15: Unexpected start tag html +#document +| +| +| abc:def="gh" +| +| +| + +#data + +#errors +15: Unexpected start tag html +#document +| +| +| xml:lang="bar" +| +| + +#data + +#errors +#document +| +| +| 123="456" +| +| + +#data + +#errors +#document +| +| +| 123="456" +| 789="012" +| +| + +#data + +#errors +#document +| +| +| +| +| 789="012" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat new file mode 100644 index 000000000..6ce1c0d16 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat @@ -0,0 +1,208 @@ +#data +

X +#errors +Line: 1 Col: 31 Unexpected end tag (p). Ignored. +Line: 1 Col: 36 Expected closing tag. Unexpected end of file. +#document +| +| +| +| +|

+| +| +| +| +| +| +| " " +|

+| "X" + +#data +

+

X +#errors +Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end tag (p). Ignored. +Line: 2 Col: 4 Expected closing tag. Unexpected end of file. +#document +| +| +| +|

+| +| +| +| +| +| +| " +" +|

+| "X" + +#data + +#errors +Line: 1 Col: 22 Unexpected end tag (html) after the (implied) root element. +#document +| +| +| +| +| " " + +#data + +#errors +Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element. +#document +| +| +| +| +| + +#data + +#errors +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end tag (html) after the (implied) root element. +#document +| +| +| +| + +#data +X +#errors +Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element. +#document +| +| +| +| +| +| "X" + +#data +<!doctype html><table> X<meta></table> +#errors +Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode. +Line: 1 Col: 30 Unexpected start tag (meta) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " X" +| <meta> +| <table> + +#data +<!doctype html><table> x</table> +#errors +Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x" +| <table> + +#data +<!doctype html><table> x </table> +#errors +Line: 1 Col: 25 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x " +| <table> + +#data +<!doctype html><table><tr> x</table> +#errors +Line: 1 Col: 28 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table>X<style> <tr>x </style> </table> +#errors +Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "X" +| <table> +| <style> +| " <tr>x " +| " " + +#data +<!doctype html><div><table><a>foo</a> <tr><td>bar</td> </tr></table></div> +#errors +Line: 1 Col: 30 Unexpected start tag (a) in table context caused voodoo mode. +Line: 1 Col: 37 Unexpected end tag (a) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| <a> +| "foo" +| <table> +| " " +| <tbody> +| <tr> +| <td> +| "bar" +| " " + +#data +<frame></frame></frame><frameset><frame><frameset><frame></frameset><noframes></frameset><noframes> +#errors +6: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”. +13: Stray start tag “frame”. +21: Stray end tag “frame”. +29: Stray end tag “frame”. +39: “frameset” start tag after “body” already open. +105: End of file seen inside an [R]CDATA element. +105: End of file seen and there were open elements. +XXX: These errors are wrong, please fix me! +#document +| <html> +| <head> +| <frameset> +| <frame> +| <frameset> +| <frame> +| <noframes> +| "</frameset><noframes>" + +#data +<!DOCTYPE html><object></html> +#errors +1: Expected closing tag. Unexpected end of file +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <object> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat new file mode 100644 index 000000000..c8ef66f0e --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat @@ -0,0 +1,2299 @@ +#data +<!doctype html><script> +#errors +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script>a +#errors +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "a" +| <body> + +#data +<!doctype html><script>< +#errors +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<" +| <body> + +#data +<!doctype html><script></ +#errors +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</" +| <body> + +#data +<!doctype html><script></S +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</S" +| <body> + +#data +<!doctype html><script></SC +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SC" +| <body> + +#data +<!doctype html><script></SCR +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCR" +| <body> + +#data +<!doctype html><script></SCRI +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRI" +| <body> + +#data +<!doctype html><script></SCRIP +#errors +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRIP" +| <body> + +#data +<!doctype html><script></SCRIPT +#errors +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRIPT" +| <body> + +#data +<!doctype html><script></SCRIPT +#errors +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script></s +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</s" +| <body> + +#data +<!doctype html><script></sc +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</sc" +| <body> + +#data +<!doctype html><script></scr +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scr" +| <body> + +#data +<!doctype html><script></scri +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scri" +| <body> + +#data +<!doctype html><script></scrip +#errors +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scrip" +| <body> + +#data +<!doctype html><script></script +#errors +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</script" +| <body> + +#data +<!doctype html><script></script +#errors +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script><! +#errors +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!" +| <body> + +#data +<!doctype html><script><!a +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!a" +| <body> + +#data +<!doctype html><script><!- +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!-" +| <body> + +#data +<!doctype html><script><!-a +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!-a" +| <body> + +#data +<!doctype html><script><!-- +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<!doctype html><script><!--a +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--a" +| <body> + +#data +<!doctype html><script><!--< +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<" +| <body> + +#data +<!doctype html><script><!--<a +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<a" +| <body> + +#data +<!doctype html><script><!--</ +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--</" +| <body> + +#data +<!doctype html><script><!--</script +#errors +Line: 1 Col: 35 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--</script" +| <body> + +#data +<!doctype html><script><!--</script +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<!doctype html><script><!--<s +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<s" +| <body> + +#data +<!doctype html><script><!--<script +#errors +Line: 1 Col: 34 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script" +| <body> + +#data +<!doctype html><script><!--<script +#errors +Line: 1 Col: 35 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script " +| <body> + +#data +<!doctype html><script><!--<script < +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script <" +| <body> + +#data +<!doctype html><script><!--<script <a +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script <a" +| <body> + +#data +<!doctype html><script><!--<script </ +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </" +| <body> + +#data +<!doctype html><script><!--<script </s +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </s" +| <body> + +#data +<!doctype html><script><!--<script </script +#errors +Line: 1 Col: 43 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script" +| <body> + +#data +<!doctype html><script><!--<script </scripta +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </scripta" +| <body> + +#data +<!doctype html><script><!--<script </script +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script> +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script>" +| <body> + +#data +<!doctype html><script><!--<script </script/ +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script/" +| <body> + +#data +<!doctype html><script><!--<script </script < +#errors +Line: 1 Col: 45 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script <" +| <body> + +#data +<!doctype html><script><!--<script </script <a +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script <a" +| <body> + +#data +<!doctype html><script><!--<script </script </ +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script </" +| <body> + +#data +<!doctype html><script><!--<script </script </script +#errors +Line: 1 Col: 52 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script </script" +| <body> + +#data +<!doctype html><script><!--<script </script </script +#errors +Line: 1 Col: 53 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script </script/ +#errors +Line: 1 Col: 53 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script </script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script - +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -" +| <body> + +#data +<!doctype html><script><!--<script -a +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -a" +| <body> + +#data +<!doctype html><script><!--<script -< +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -<" +| <body> + +#data +<!doctype html><script><!--<script -- +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --" +| <body> + +#data +<!doctype html><script><!--<script --a +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --a" +| <body> + +#data +<!doctype html><script><!--<script --< +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --<" +| <body> + +#data +<!doctype html><script><!--<script --> +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script -->< +#errors +Line: 1 Col: 39 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --><" +| <body> + +#data +<!doctype html><script><!--<script --></ +#errors +Line: 1 Col: 40 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --></" +| <body> + +#data +<!doctype html><script><!--<script --></script +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --></script" +| <body> + +#data +<!doctype html><script><!--<script --></script +#errors +Line: 1 Col: 47 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script --></script/ +#errors +Line: 1 Col: 47 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script --></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script><\/script>--></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script><\/script>-->" +| <body> + +#data +<!doctype html><script><!--<script></scr'+'ipt>--></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt>-->" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>--><!--</script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>--><!--" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>-- ></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>-- >" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>- -></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- ->" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>- - ></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- - >" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>-></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>->" +| <body> + +#data +<!doctype html><script><!--<script>--!></script>X +#errors +Line: 1 Col: 49 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script>--!></script>X" +| <body> + +#data +<!doctype html><script><!--<scr'+'ipt></script>--></script> +#errors +Line: 1 Col: 59 Unexpected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<scr'+'ipt>" +| <body> +| "-->" + +#data +<!doctype html><script><!--<script></scr'+'ipt></script>X +#errors +Line: 1 Col: 57 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt></script>X" +| <body> + +#data +<!doctype html><style><!--<style></style>--></style> +#errors +Line: 1 Col: 52 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--<style>" +| <body> +| "-->" + +#data +<!doctype html><style><!--</style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--" +| <body> +| "X" + +#data +<!doctype html><style><!--...</style>...--></style> +#errors +Line: 1 Col: 51 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--..." +| <body> +| "...-->" + +#data +<!doctype html><style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>" +| <body> +| "X" + +#data +<!doctype html><style><!--...<style><!--...--!></style>--></style> +#errors +Line: 1 Col: 66 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--...<style><!--...--!>" +| <body> +| "-->" + +#data +<!doctype html><style><!--...</style><!-- --><style>@import ...</style> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--..." +| <!-- --> +| <style> +| "@import ..." +| <body> + +#data +<!doctype html><style>...<style><!--...</style><!-- --></style> +#errors +Line: 1 Col: 63 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "...<style><!--..." +| <!-- --> +| <body> + +#data +<!doctype html><style>...<!--[if IE]><style>...</style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "...<!--[if IE]><style>..." +| <body> +| "X" + +#data +<!doctype html><title><!--<title>--> +#errors +Line: 1 Col: 52 Unexpected end tag (title). +#document +| +| +| +| +| "<!--<title>" +| <body> +| "-->" + +#data +<!doctype html><title></title> +#errors +#document +| +| +| +| +| "" +| + +#data +foo/title><link></head><body>X +#errors +Line: 1 Col: 52 Unexpected end of file. Expected end tag (title). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <title> +| "foo/title><link></head><body>X" +| <body> + +#data +<!doctype html><noscript><!--<noscript></noscript>--></noscript> +#errors +Line: 1 Col: 64 Unexpected end tag (noscript). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<!--<noscript>" +| <body> +| "-->" + +#data +<!doctype html><noscript><!--</noscript>X<noscript>--></noscript> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<!--" +| <body> +| "X" +| <noscript> +| "-->" + +#data +<!doctype html><noscript><iframe></noscript>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<iframe>" +| <body> +| "X" + +#data +<!doctype html><noframes><!--<noframes></noframes>--></noframes> +#errors +Line: 1 Col: 64 Unexpected end tag (noframes). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noframes> +| "<!--<noframes>" +| <body> +| "-->" + +#data +<!doctype html><noframes><body><script><!--...</script></body></noframes></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noframes> +| "<body><script><!--...</script></body>" +| <body> + +#data +<!doctype html><textarea><!--<textarea></textarea>--></textarea> +#errors +Line: 1 Col: 64 Unexpected end tag (textarea). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "<!--<textarea>" +| "-->" + +#data +<!doctype html><textarea></textarea></textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "</textarea>" + +#data +<!doctype html><textarea><</textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "<" + +#data +<!doctype html><textarea>a<b</textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "a<b" + +#data +<!doctype html><iframe><!--<iframe></iframe>--></iframe> +#errors +Line: 1 Col: 56 Unexpected end tag (iframe). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> +| "<!--<iframe>" +| "-->" + +#data +<!doctype html><iframe>...<!--X->...<!--/X->...</iframe> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> +| "...<!--X->...<!--/X->..." + +#data +<!doctype html><xmp><!--<xmp></xmp>--></xmp> +#errors +Line: 1 Col: 44 Unexpected end tag (xmp). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <xmp> +| "<!--<xmp>" +| "-->" + +#data +<!doctype html><noembed><!--<noembed></noembed>--></noembed> +#errors +Line: 1 Col: 60 Unexpected end tag (noembed). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <noembed> +| "<!--<noembed>" +| "-->" + +#data +<script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 8 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script>a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 9 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "a" +| <body> + +#data +<script>< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 9 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<" +| <body> + +#data +<script></ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 10 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</" +| <body> + +#data +<script></S +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</S" +| <body> + +#data +<script></SC +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SC" +| <body> + +#data +<script></SCR +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCR" +| <body> + +#data +<script></SCRI +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRI" +| <body> + +#data +<script></SCRIP +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 15 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRIP" +| <body> + +#data +<script></SCRIPT +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRIPT" +| <body> + +#data +<script></SCRIPT +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 17 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script></s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</s" +| <body> + +#data +<script></sc +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</sc" +| <body> + +#data +<script></scr +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scr" +| <body> + +#data +<script></scri +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scri" +| <body> + +#data +<script></scrip +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 15 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scrip" +| <body> + +#data +<script></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</script" +| <body> + +#data +<script></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 17 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script><! +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 10 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!" +| <body> + +#data +<script><!a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!a" +| <body> + +#data +<script><!- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!-" +| <body> + +#data +<script><!-a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!-a" +| <body> + +#data +<script><!-- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<script><!--a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--a" +| <body> + +#data +<script><!--< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<" +| <body> + +#data +<script><!--<a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<a" +| <body> + +#data +<script><!--</ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--</" +| <body> + +#data +<script><!--</script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--</script" +| <body> + +#data +<script><!--</script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<script><!--<s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<s" +| <body> + +#data +<script><!--<script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 19 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script" +| <body> + +#data +<script><!--<script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script " +| <body> + +#data +<script><!--<script < +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script <" +| <body> + +#data +<script><!--<script <a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script <a" +| <body> + +#data +<script><!--<script </ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </" +| <body> + +#data +<script><!--<script </s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </s" +| <body> + +#data +<script><!--<script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script" +| <body> + +#data +<script><!--<script </scripta +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </scripta" +| <body> + +#data +<script><!--<script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script>" +| <body> + +#data +<script><!--<script </script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script/" +| <body> + +#data +<script><!--<script </script < +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script <" +| <body> + +#data +<script><!--<script </script <a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script <a" +| <body> + +#data +<script><!--<script </script </ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script </" +| <body> + +#data +<script><!--<script </script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script </script" +| <body> + +#data +<script><!--<script </script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script </script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script </script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script - +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -" +| <body> + +#data +<script><!--<script -a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -a" +| <body> + +#data +<script><!--<script -- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --" +| <body> + +#data +<script><!--<script --a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --a" +| <body> + +#data +<script><!--<script --> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script -->< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --><" +| <body> + +#data +<script><!--<script --></ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --></" +| <body> + +#data +<script><!--<script --></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --></script" +| <body> + +#data +<script><!--<script --></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script --></script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script --></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script><\/script>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script><\/script>-->" +| <body> + +#data +<script><!--<script></scr'+'ipt>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt>-->" +| <body> + +#data +<script><!--<script></script><script></script></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>" +| <body> + +#data +<script><!--<script></script><script></script>--><!--</script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>--><!--" +| <body> + +#data +<script><!--<script></script><script></script>-- ></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>-- >" +| <body> + +#data +<script><!--<script></script><script></script>- -></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- ->" +| <body> + +#data +<script><!--<script></script><script></script>- - ></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- - >" +| <body> + +#data +<script><!--<script></script><script></script>-></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>->" +| <body> + +#data +<script><!--<script>--!></script>X +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 34 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script>--!></script>X" +| <body> + +#data +<script><!--<scr'+'ipt></script>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 44 Unexpected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<scr'+'ipt>" +| <body> +| "-->" + +#data +<script><!--<script></scr'+'ipt></script>X +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 42 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt></script>X" +| <body> + +#data +<style><!--<style></style>--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--<style>" +| <body> +| "-->" + +#data +<style><!--</style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--" +| <body> +| "X" + +#data +<style><!--...</style>...--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 36 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--..." +| <body> +| "...-->" + +#data +<style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>" +| <body> +| "X" + +#data +<style><!--...<style><!--...--!></style>--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 51 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--...<style><!--...--!>" +| <body> +| "-->" + +#data +<style><!--...</style><!-- --><style>@import ...</style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--..." +| <!-- --> +| <style> +| "@import ..." +| <body> + +#data +<style>...<style><!--...</style><!-- --></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 48 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "...<style><!--..." +| <!-- --> +| <body> + +#data +<style>...<!--[if IE]><style>...</style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "...<!--[if IE]><style>..." +| <body> +| "X" + +#data +<title><!--<title>--> +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end tag (title). +#document +| +| +| +| "<!--<title>" +| <body> +| "-->" + +#data +<title></title> +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +#document +| +| +| +| "" +| + +#data +foo/title><link></head><body>X +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end of file. Expected end tag (title). +#document +| <html> +| <head> +| <title> +| "foo/title><link></head><body>X" +| <body> + +#data +<noscript><!--<noscript></noscript>--></noscript> +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (noscript). +#document +| <html> +| <head> +| <noscript> +| "<!--<noscript>" +| <body> +| "-->" + +#data +<noscript><!--</noscript>X<noscript>--></noscript> +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +#document +| <html> +| <head> +| <noscript> +| "<!--" +| <body> +| "X" +| <noscript> +| "-->" + +#data +<noscript><iframe></noscript>X +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +#document +| <html> +| <head> +| <noscript> +| "<iframe>" +| <body> +| "X" + +#data +<noframes><!--<noframes></noframes>--></noframes> +#errors +Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (noframes). +#document +| <html> +| <head> +| <noframes> +| "<!--<noframes>" +| <body> +| "-->" + +#data +<noframes><body><script><!--...</script></body></noframes></html> +#errors +Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE. +#document +| <html> +| <head> +| <noframes> +| "<body><script><!--...</script></body>" +| <body> + +#data +<textarea><!--<textarea></textarea>--></textarea> +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (textarea). +#document +| <html> +| <head> +| <body> +| <textarea> +| "<!--<textarea>" +| "-->" + +#data +<textarea></textarea></textarea> +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| <textarea> +| "</textarea>" + +#data +<iframe><!--<iframe></iframe>--></iframe> +#errors +Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE. +Line: 1 Col: 41 Unexpected end tag (iframe). +#document +| <html> +| <head> +| <body> +| <iframe> +| "<!--<iframe>" +| "-->" + +#data +<iframe>...<!--X->...<!--/X->...</iframe> +#errors +Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| <iframe> +| "...<!--X->...<!--/X->..." + +#data +<xmp><!--<xmp></xmp>--></xmp> +#errors +Line: 1 Col: 5 Unexpected start tag (xmp). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end tag (xmp). +#document +| <html> +| <head> +| <body> +| <xmp> +| "<!--<xmp>" +| "-->" + +#data +<noembed><!--<noembed></noembed>--></noembed> +#errors +Line: 1 Col: 9 Unexpected start tag (noembed). Expected DOCTYPE. +Line: 1 Col: 45 Unexpected end tag (noembed). +#document +| <html> +| <head> +| <body> +| <noembed> +| "<!--<noembed>" +| "-->" + +#data +<!doctype html><table> + +#errors +Line 2 Col 0 Unexpected end of file. Expected table content. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| " +" + +#data +<!doctype html><table><td><span><font></span><span> +#errors +Line 1 Col 26 Unexpected table cell start tag (td) in the table body phase. +Line 1 Col 45 Unexpected end tag (span). +Line 1 Col 51 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <span> +| <font> +| <font> +| <span> + +#data +<!doctype html><form><table></form><form></table></form> +#errors +35: Stray end tag “form”. +41: Start tag “form” seen in “table”. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <table> +| <form> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat new file mode 100644 index 000000000..7b555f888 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat @@ -0,0 +1,153 @@ +#data +<!doctype html><table><tbody><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><tr><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <table> +| <tbody> +| <tr> +| <td> + +#data +<!doctype html><table><tr><td><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <select> +| <td> + +#data +<!doctype html><table><tr><th><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <th> +| <select> +| <td> + +#data +<!doctype html><table><caption><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <select> +| <tbody> +| <tr> + +#data +<!doctype html><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><th> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><tbody> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><thead> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><tfoot> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><caption> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><table><tr></table>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| "a" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat new file mode 100644 index 000000000..680e1f068 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat @@ -0,0 +1,269 @@ +#data +<!doctype html><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> + +#data +<!doctype html><table><tbody><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> + +#data +<!doctype html><table><tbody><tr><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><tbody><tr><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><td><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><caption><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><tr><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <tbody> +| <tr> +| <style> +| "</script>" + +#data +<!doctype html><table><tr><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <tbody> +| <tr> +| <script> +| "</style>" + +#data +<!doctype html><table><caption><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <style> +| "</script>" +| "abc" + +#data +<!doctype html><table><td><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <style> +| "</script>" +| "abc" + +#data +<!doctype html><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" + +#data +<!doctype html><table><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" +| <table> + +#data +<!doctype html><table><tr><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><frameset></frameset><noframes>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" + +#data +<!doctype html><frameset></frameset><noframes>abc</noframes><!--abc--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" +| <!-- abc --> + +#data +<!doctype html><frameset></frameset></html><noframes>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" + +#data +<!doctype html><frameset></frameset></html><noframes>abc</noframes><!--abc--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" +| <!-- abc --> + +#data +<!doctype html><table><tr></tbody><tfoot> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <tfoot> + +#data +<!doctype html><table><td><svg></svg>abc<td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <svg svg> +| "abc" +| <td> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat new file mode 100644 index 000000000..0d62f5a5b --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat @@ -0,0 +1,1237 @@ +#data +<!doctype html><math><mn DefinitionUrl="foo"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <math mn> +| definitionURL="foo" + +#data +<!doctype html><html></p><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <!-- foo --> +| <head> +| <body> + +#data +<!doctype html><head></head></p><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <!-- foo --> +| <body> + +#data +<!doctype html><body><p><pre> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <pre> + +#data +<!doctype html><body><p><listing> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <listing> + +#data +<!doctype html><p><plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <plaintext> + +#data +<!doctype html><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <h1> + +#data +<!doctype html><form><isindex> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> + +#data +<!doctype html><isindex action="POST"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| action="POST" +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><isindex prompt="this is isindex"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "this is isindex" +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><isindex type="hidden"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| type="hidden" +| <hr> + +#data +<!doctype html><isindex name="foo"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><ruby><p><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <p> +| <rp> + +#data +<!doctype html><ruby><div><span><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <span> +| <rp> + +#data +<!doctype html><ruby><div><p><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <p> +| <rp> + +#data +<!doctype html><ruby><p><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <p> +| <rt> + +#data +<!doctype html><ruby><div><span><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <span> +| <rt> + +#data +<!doctype html><ruby><div><p><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <p> +| <rt> + +#data +<!doctype html><math/><foo> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <foo> + +#data +<!doctype html><svg/><foo> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <svg svg> +| <foo> + +#data +<!doctype html><div></body><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| <!-- foo --> + +#data +<!doctype html><h1><div><h3><span></h1>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <h1> +| <div> +| <h3> +| <span> +| "foo" + +#data +<!doctype html><p></h3>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "foo" + +#data +<!doctype html><h3><li>abc</h2>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <h3> +| <li> +| "abc" +| "foo" + +#data +<!doctype html><table>abc<!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <!-- foo --> + +#data +<!doctype html><table> <!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| " " +| <!-- foo --> + +#data +<!doctype html><table> b <!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " b " +| <table> +| <!-- foo --> + +#data +<!doctype html><select><option><option> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> +| <option> + +#data +<!doctype html><select><option></optgroup> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> + +#data +<!doctype html><select><option></optgroup> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> + +#data +<!doctype html><p><math><mi><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mi> +| <p> +| <h1> + +#data +<!doctype html><p><math><mo><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mo> +| <p> +| <h1> + +#data +<!doctype html><p><math><mn><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mn> +| <p> +| <h1> + +#data +<!doctype html><p><math><ms><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math ms> +| <p> +| <h1> + +#data +<!doctype html><p><math><mtext><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mtext> +| <p> +| <h1> + +#data +<!doctype html><frameset></noframes> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html c=d><body></html><html a=b> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <body> + +#data +<!doctype html><html c=d><frameset></frameset></html><html a=b> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <!-- foo --> + +#data +<!doctype html><html><frameset></frameset></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| " " + +#data +<!doctype html><html><frameset></frameset></html>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html><p> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html></p> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<html><frameset></frameset></html><!doctype html> +#errors +#document +| <html> +| <head> +| <frameset> + +#data +<!doctype html><body><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> + +#data +<!doctype html><p><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><p>a<frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "a" + +#data +<!doctype html><p> <frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><pre><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <pre> + +#data +<!doctype html><listing><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <listing> + +#data +<!doctype html><li><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <li> + +#data +<!doctype html><dd><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dd> + +#data +<!doctype html><dt><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dt> + +#data +<!doctype html><button><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <button> + +#data +<!doctype html><applet><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <applet> + +#data +<!doctype html><marquee><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <marquee> + +#data +<!doctype html><object><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <object> + +#data +<!doctype html><table><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> + +#data +<!doctype html><area><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <area> + +#data +<!doctype html><basefont><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <basefont> +| <frameset> + +#data +<!doctype html><bgsound><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <bgsound> +| <frameset> + +#data +<!doctype html><br><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <br> + +#data +<!doctype html><embed><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <embed> + +#data +<!doctype html><img><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <img> + +#data +<!doctype html><input><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <input> + +#data +<!doctype html><keygen><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <keygen> + +#data +<!doctype html><wbr><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <wbr> + +#data +<!doctype html><hr><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <hr> + +#data +<!doctype html><textarea></textarea><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> + +#data +<!doctype html><xmp></xmp><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <xmp> + +#data +<!doctype html><iframe></iframe><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> + +#data +<!doctype html><select></select><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><svg></svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><math></math><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><svg><foreignObject><div> <frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><svg>a</svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <svg svg> +| "a" + +#data +<!doctype html><svg> </svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<html>aaa<frameset></frameset> +#errors +#document +| <html> +| <head> +| <body> +| "aaa" + +#data +<html> a <frameset></frameset> +#errors +#document +| <html> +| <head> +| <body> +| "a " + +#data +<!doctype html><div><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><div><body><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> + +#data +<!doctype html><p><math></p>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| "a" + +#data +<!doctype html><p><math><mn><span></p>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mn> +| <span> +| <p> +| "a" + +#data +<!doctype html><math></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> + +#data +<!doctype html><meta charset="ascii"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <meta> +| charset="ascii" +| <body> + +#data +<!doctype html><meta http-equiv="content-type" content="text/html;charset=ascii"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <meta> +| content="text/html;charset=ascii" +| http-equiv="content-type" +| <body> + +#data +<!doctype html><head><!--aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa--><meta charset="utf8"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <!-- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa --> +| <meta> +| charset="utf8" +| <body> + +#data +<!doctype html><html a=b><head></head><html c=d> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <body> + +#data +<!doctype html><image/> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <img> + +#data +<!doctype html>a<i>b<table>c<b>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "a" +| <i> +| "bc" +| <b> +| "de" +| "f" +| <table> + +#data +<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" +| <table> + +#data +<!doctype html><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" + +#data +<!doctype html><table><i>a<b>b<div>c</i> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <i> +| "c" +| <table> + +#data +<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" +| <table> + +#data +<!doctype html><table><i>a<div>b<tr>c<b>d</i>e +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <div> +| "b" +| <i> +| "c" +| <b> +| "d" +| <b> +| "e" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><td><table><i>a<div>b<b>c</i>d +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <i> +| "a" +| <div> +| <i> +| "b" +| <b> +| "c" +| <b> +| "d" +| <table> + +#data +<!doctype html><body><bgsound> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <bgsound> + +#data +<!doctype html><body><basefont> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <basefont> + +#data +<!doctype html><a><b></a><basefont> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <a> +| <b> +| <basefont> + +#data +<!doctype html><a><b></a><bgsound> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <a> +| <b> +| <bgsound> + +#data +<!doctype html><figcaption><article></figcaption>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <figcaption> +| <article> +| "a" + +#data +<!doctype html><summary><article></summary>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <summary> +| <article> +| "a" + +#data +<!doctype html><p><a><plaintext>b +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <a> +| <plaintext> +| <a> +| "b" + +#data +<!DOCTYPE html><div>a<a></div>b<p>c</p>d +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| "a" +| <a> +| <a> +| "b" +| <p> +| "c" +| "d" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat new file mode 100644 index 000000000..60d859221 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat @@ -0,0 +1,763 @@ +#data +<!DOCTYPE html>Test +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "Test" + +#data +<textarea>test</div>test +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +Line: 1 Col: 24 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <textarea> +| "test</div>test" + +#data +<table><td> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase. +Line: 1 Col: 11 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> + +#data +<table><td>test</tbody></table> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| "test" + +#data +<frame>test +#errors +Line: 1 Col: 7 Unexpected start tag (frame). Expected DOCTYPE. +Line: 1 Col: 7 Unexpected start tag frame. Ignored. +#document +| <html> +| <head> +| <body> +| "test" + +#data +<!DOCTYPE html><frameset>test +#errors +Line: 1 Col: 29 Unepxected characters in the frameset phase. Characters ignored. +Line: 1 Col: 29 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!DOCTYPE html><frameset><!DOCTYPE html> +#errors +Line: 1 Col: 40 Unexpected DOCTYPE. Ignored. +Line: 1 Col: 40 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!DOCTYPE html><font><p><b>test</font> +#errors +Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm. +Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <font> +| <p> +| <font> +| <b> +| "test" + +#data +<!DOCTYPE html><dt><div><dd> +#errors +Line: 1 Col: 28 Missing end tag (div, dt). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dt> +| <div> +| <dd> + +#data +<script></x +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</x" +| <body> + +#data +<table><plaintext><td> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 18 Unexpected start tag (plaintext) in table context caused voodoo mode. +Line: 1 Col: 22 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| <plaintext> +| "<td>" +| <table> + +#data +<plaintext></plaintext> +#errors +Line: 1 Col: 11 Unexpected start tag (plaintext). Expected DOCTYPE. +Line: 1 Col: 23 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" + +#data +<!DOCTYPE html><table><tr>TEST +#errors +Line: 1 Col: 30 Unexpected non-space characters in table context caused voodoo mode. +Line: 1 Col: 30 Unexpected end of file. Expected table content. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "TEST" +| <table> +| <tbody> +| <tr> + +#data +<!DOCTYPE html><body t1=1><body t2=2><body t3=3 t4=4> +#errors +Line: 1 Col: 37 Unexpected start tag (body). +Line: 1 Col: 53 Unexpected start tag (body). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| t1="1" +| t2="2" +| t3="3" +| t4="4" + +#data +</b test +#errors +Line: 1 Col: 8 Unexpected end of file in attribute name. +Line: 1 Col: 8 End tag contains unexpected attributes. +Line: 1 Col: 8 Unexpected end tag (b). Expected DOCTYPE. +Line: 1 Col: 8 Unexpected end tag (b) after the (implied) root element. +#document +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html></b test<b &=&>X +#errors +Line: 1 Col: 32 Named entity didn't end with ';'. +Line: 1 Col: 33 End tag contains unexpected attributes. +Line: 1 Col: 33 Unexpected end tag (b) after the (implied) root element. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "X" + +#data +<!doctypehtml><scrIPt type=text/x-foobar;baz>X</SCRipt +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +Line: 1 Col: 54 Unexpected end of file in the tag name. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| type="text/x-foobar;baz" +| "X</SCRipt" +| <body> + +#data +& +#errors +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&" + +#data +&# +#errors +Line: 1 Col: 1 Numeric entity expected. Got end of file instead. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#" + +#data +&#X +#errors +Line: 1 Col: 3 Numeric entity expected but none found. +Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#X" + +#data +&#x +#errors +Line: 1 Col: 3 Numeric entity expected but none found. +Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#x" + +#data +- +#errors +Line: 1 Col: 4 Numeric entity didn't end with ';'. +Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "-" + +#data +&x-test +#errors +Line: 1 Col: 1 Named entity expected. Got none. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&x-test" + +#data +<!doctypehtml><p><li> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <li> + +#data +<!doctypehtml><p><dt> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <dt> + +#data +<!doctypehtml><p><dd> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <dd> + +#data +<!doctypehtml><p><form> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +Line: 1 Col: 23 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <form> + +#data +<!DOCTYPE html><p></P>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "X" + +#data +& +#errors +Line: 1 Col: 4 Named entity didn't end with ';'. +Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&" + +#data +&AMp; +#errors +Line: 1 Col: 1 Named entity expected. Got none. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&AMp;" + +#data +<!DOCTYPE html><html><head></head><body><thisISasillyTESTelementNameToMakeSureCrazyTagNamesArePARSEDcorrectLY> +#errors +Line: 1 Col: 110 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <thisisasillytestelementnametomakesurecrazytagnamesareparsedcorrectly> + +#data +<!DOCTYPE html>X</body>X +#errors +Line: 1 Col: 24 Unexpected non-space characters in the after body phase. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "XX" + +#data +<!DOCTYPE html><!-- X +#errors +Line: 1 Col: 21 Unexpected end of file in comment. +#document +| <!DOCTYPE html> +| <!-- X --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><table><caption>test TEST</caption><td>test +#errors +Line: 1 Col: 54 Unexpected table cell start tag (td) in the table body phase. +Line: 1 Col: 58 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| "test TEST" +| <tbody> +| <tr> +| <td> +| "test" + +#data +<!DOCTYPE html><select><option><optgroup> +#errors +Line: 1 Col: 41 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> +| <optgroup> + +#data +<!DOCTYPE html><select><optgroup><option></optgroup><option><select><option> +#errors +Line: 1 Col: 68 Unexpected select start tag in the select phase treated as select end tag. +Line: 1 Col: 76 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> +| <option> +| <option> +| <option> + +#data +<!DOCTYPE html><select><optgroup><option><optgroup> +#errors +Line: 1 Col: 51 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> +| <option> +| <optgroup> + +#data +<!DOCTYPE html><datalist><option>foo</datalist>bar +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <datalist> +| <option> +| "foo" +| "bar" + +#data +<!DOCTYPE html><font><input><input></font> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <font> +| <input> +| <input> + +#data +<!DOCTYPE html><!-- XXX - XXX --> +#errors +#document +| <!DOCTYPE html> +| <!-- XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><!-- XXX - XXX +#errors +Line: 1 Col: 29 Unexpected end of file in comment (-) +#document +| <!DOCTYPE html> +| <!-- XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><!-- XXX - XXX - XXX --> +#errors +#document +| <!DOCTYPE html> +| <!-- XXX - XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<isindex test=x name=x> +#errors +Line: 1 Col: 23 Unexpected start tag (isindex). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected start tag isindex. Don't use it! +#document +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| test="x" +| <hr> + +#data +test +test +#errors +Line: 2 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "test +test" + +#data +<!DOCTYPE html><body><title>test</body> +#errors +#document +| +| +| +| +| +| "test</body>" + +#data +<!DOCTYPE html><body><title>X +#errors +#document +| +| +| +| +| +| "X" +| <meta> +| name="z" +| <link> +| rel="foo" +| <style> +| " +x { content:"</style" } " + +#data +<!DOCTYPE html><select><optgroup></optgroup></select> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> + +#data + + +#errors +Line: 2 Col: 1 Unexpected End of file. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html> <html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><script> +</script> <title>x +#errors +#document +| +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected start tag (script) that can be in head. Moved. +#document +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +Line: 1 Col: 28 Unexpected start tag (style) that can be in head. Moved. +#document +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +#document +| +| +| +| +| "x" +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (style). +#document +| +| +| --> x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +|

+#errors +#document +| +| +| +| +| +| ddd +#errors +#document +| +| +| +#errors +#document +| +| +| +| +|
  • +| +| ", + " +
    << Back to Go HTTP/2 demo server`) + }) +} + +func httpsHost() string { + if *hostHTTPS != "" { + return *hostHTTPS + } + if v := *httpsAddr; strings.HasPrefix(v, ":") { + return "localhost" + v + } else { + return v + } +} + +func httpHost() string { + if *hostHTTP != "" { + return *hostHTTP + } + if v := *httpAddr; strings.HasPrefix(v, ":") { + return "localhost" + v + } else { + return v + } +} + +func serveProdTLS(autocertManager *autocert.Manager) error { + srv := &http.Server{ + TLSConfig: &tls.Config{ + GetCertificate: autocertManager.GetCertificate, + }, + } + http2.ConfigureServer(srv, &http2.Server{ + NewWriteScheduler: func() http2.WriteScheduler { + return http2.NewPriorityWriteScheduler(nil) + }, + }) + ln, err := net.Listen("tcp", ":443") + if err != nil { + return err + } + return srv.Serve(tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig)) +} + +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +func serveProd() error { + log.Printf("running in production mode") + + storageClient, err := storage.NewClient(context.Background()) + if err != nil { + log.Fatalf("storage.NewClient: %v", err) + } + autocertManager := &autocert.Manager{ + Prompt: autocert.AcceptTOS, + HostPolicy: autocert.HostWhitelist("http2.golang.org"), + Cache: autocertcache.NewGoogleCloudStorageCache(storageClient, "golang-h2demo-autocert"), + } + + errc := make(chan error, 2) + go func() { errc <- http.ListenAndServe(":80", autocertManager.HTTPHandler(http.DefaultServeMux)) }() + go func() { errc <- serveProdTLS(autocertManager) }() + return <-errc +} + +const idleTimeout = 5 * time.Minute +const activeTimeout = 10 * time.Minute + +// TODO: put this into the standard library and actually send +// PING frames and GOAWAY, etc: golang.org/issue/14204 +func idleTimeoutHook() func(net.Conn, http.ConnState) { + var mu sync.Mutex + m := map[net.Conn]*time.Timer{} + return func(c net.Conn, cs http.ConnState) { + mu.Lock() + defer mu.Unlock() + if t, ok := m[c]; ok { + delete(m, c) + t.Stop() + } + var d time.Duration + switch cs { + case http.StateNew, http.StateIdle: + d = idleTimeout + case http.StateActive: + d = activeTimeout + default: + return + } + m[c] = time.AfterFunc(d, func() { + log.Printf("closing idle conn %v after %v", c.RemoteAddr(), d) + go c.Close() + }) + } +} + +func main() { + var srv http.Server + flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.") + flag.Parse() + srv.Addr = *httpsAddr + srv.ConnState = idleTimeoutHook() + + registerHandlers() + + if *prod { + *hostHTTP = "http2.golang.org" + *hostHTTPS = "http2.golang.org" + log.Fatal(serveProd()) + } + + url := "https://" + httpsHost() + "/" + log.Printf("Listening on " + url) + http2.ConfigureServer(&srv, &http2.Server{}) + + if *httpAddr != "" { + go func() { + log.Printf("Listening on http://" + httpHost() + "/ (for unencrypted HTTP/1)") + log.Fatal(http.ListenAndServe(*httpAddr, nil)) + }() + } + + go func() { + log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key")) + }() + select {} +} diff --git a/vendor/golang.org/x/net/http2/h2demo/launch.go b/vendor/golang.org/x/net/http2/h2demo/launch.go new file mode 100644 index 000000000..df0866a30 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/launch.go @@ -0,0 +1,302 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + compute "google.golang.org/api/compute/v1" +) + +var ( + proj = flag.String("project", "symbolic-datum-552", "name of Project") + zone = flag.String("zone", "us-central1-a", "GCE zone") + mach = flag.String("machinetype", "n1-standard-1", "Machine type") + instName = flag.String("instance_name", "http2-demo", "Name of VM instance.") + sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.") + staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.") + + writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.") + publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.") +) + +func readFile(v string) string { + slurp, err := ioutil.ReadFile(v) + if err != nil { + log.Fatalf("Error reading %s: %v", v, err) + } + return strings.TrimSpace(string(slurp)) +} + +var config = &oauth2.Config{ + // The client-id and secret should be for an "Installed Application" when using + // the CLI. Later we'll use a web application with a callback. + ClientID: readFile("client-id.dat"), + ClientSecret: readFile("client-secret.dat"), + Endpoint: google.Endpoint, + Scopes: []string{ + compute.DevstorageFullControlScope, + compute.ComputeScope, + "https://www.googleapis.com/auth/sqlservice", + "https://www.googleapis.com/auth/sqlservice.admin", + }, + RedirectURL: "urn:ietf:wg:oauth:2.0:oob", +} + +const baseConfig = `#cloud-config +coreos: + units: + - name: h2demo.service + command: start + content: | + [Unit] + Description=HTTP2 Demo + + [Service] + ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo' + ExecStart=/opt/bin/h2demo --prod + RestartSec=5s + Restart=always + Type=simple + + [Install] + WantedBy=multi-user.target +` + +func main() { + flag.Parse() + if *proj == "" { + log.Fatalf("Missing --project flag") + } + prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj + machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach + + const tokenFileName = "token.dat" + tokenFile := tokenCacheFile(tokenFileName) + tokenSource := oauth2.ReuseTokenSource(nil, tokenFile) + token, err := tokenSource.Token() + if err != nil { + if *writeObject != "" { + log.Fatalf("Can't use --write_object without a valid token.dat file already cached.") + } + log.Printf("Error getting token from %s: %v", tokenFileName, err) + log.Printf("Get auth code from %v", config.AuthCodeURL("my-state")) + fmt.Print("\nEnter auth code: ") + sc := bufio.NewScanner(os.Stdin) + sc.Scan() + authCode := strings.TrimSpace(sc.Text()) + token, err = config.Exchange(oauth2.NoContext, authCode) + if err != nil { + log.Fatalf("Error exchanging auth code for a token: %v", err) + } + if err := tokenFile.WriteToken(token); err != nil { + log.Fatalf("Error writing to %s: %v", tokenFileName, err) + } + tokenSource = oauth2.ReuseTokenSource(token, nil) + } + + oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource) + + if *writeObject != "" { + writeCloudStorageObject(oauthClient) + return + } + + computeService, _ := compute.New(oauthClient) + + natIP := *staticIP + if natIP == "" { + // Try to find it by name. + aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do() + if err != nil { + log.Fatal(err) + } + // http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList + IPLoop: + for _, asl := range aggAddrList.Items { + for _, addr := range asl.Addresses { + if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" { + natIP = addr.Address + break IPLoop + } + } + } + } + + cloudConfig := baseConfig + if *sshPub != "" { + key := strings.TrimSpace(readFile(*sshPub)) + cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key) + } + if os.Getenv("USER") == "bradfitz" { + cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com") + } + const maxCloudConfig = 32 << 10 // per compute API docs + if len(cloudConfig) > maxCloudConfig { + log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig) + } + + instance := &compute.Instance{ + Name: *instName, + Description: "Go Builder", + MachineType: machType, + Disks: []*compute.AttachedDisk{instanceDisk(computeService)}, + Tags: &compute.Tags{ + Items: []string{"http-server", "https-server"}, + }, + Metadata: &compute.Metadata{ + Items: []*compute.MetadataItems{ + { + Key: "user-data", + Value: &cloudConfig, + }, + }, + }, + NetworkInterfaces: []*compute.NetworkInterface{ + { + AccessConfigs: []*compute.AccessConfig{ + { + Type: "ONE_TO_ONE_NAT", + Name: "External NAT", + NatIP: natIP, + }, + }, + Network: prefix + "/global/networks/default", + }, + }, + ServiceAccounts: []*compute.ServiceAccount{ + { + Email: "default", + Scopes: []string{ + compute.DevstorageFullControlScope, + compute.ComputeScope, + }, + }, + }, + } + + log.Printf("Creating instance...") + op, err := computeService.Instances.Insert(*proj, *zone, instance).Do() + if err != nil { + log.Fatalf("Failed to create instance: %v", err) + } + opName := op.Name + log.Printf("Created. Waiting on operation %v", opName) +OpLoop: + for { + time.Sleep(2 * time.Second) + op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do() + if err != nil { + log.Fatalf("Failed to get op %s: %v", opName, err) + } + switch op.Status { + case "PENDING", "RUNNING": + log.Printf("Waiting on operation %v", opName) + continue + case "DONE": + if op.Error != nil { + for _, operr := range op.Error.Errors { + log.Printf("Error: %+v", operr) + } + log.Fatalf("Failed to start.") + } + log.Printf("Success. %+v", op) + break OpLoop + default: + log.Fatalf("Unknown status %q: %+v", op.Status, op) + } + } + + inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do() + if err != nil { + log.Fatalf("Error getting instance after creation: %v", err) + } + ij, _ := json.MarshalIndent(inst, "", " ") + log.Printf("Instance: %s", ij) +} + +func instanceDisk(svc *compute.Service) *compute.AttachedDisk { + const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016" + diskName := *instName + "-disk" + + return &compute.AttachedDisk{ + AutoDelete: true, + Boot: true, + Type: "PERSISTENT", + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskName: diskName, + SourceImage: imageURL, + DiskSizeGb: 50, + }, + } +} + +func writeCloudStorageObject(httpClient *http.Client) { + content := os.Stdin + const maxSlurp = 1 << 20 + var buf bytes.Buffer + n, err := io.CopyN(&buf, content, maxSlurp) + if err != nil && err != io.EOF { + log.Fatalf("Error reading from stdin: %v, %v", n, err) + } + contentType := http.DetectContentType(buf.Bytes()) + + req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content)) + if err != nil { + log.Fatal(err) + } + req.Header.Set("x-goog-api-version", "2") + if *publicObject { + req.Header.Set("x-goog-acl", "public-read") + } + req.Header.Set("Content-Type", contentType) + res, err := httpClient.Do(req) + if err != nil { + log.Fatal(err) + } + if res.StatusCode != 200 { + res.Write(os.Stderr) + log.Fatalf("Failed.") + } + log.Printf("Success.") + os.Exit(0) +} + +type tokenCacheFile string + +func (f tokenCacheFile) Token() (*oauth2.Token, error) { + slurp, err := ioutil.ReadFile(string(f)) + if err != nil { + return nil, err + } + t := new(oauth2.Token) + if err := json.Unmarshal(slurp, t); err != nil { + return nil, err + } + return t, nil +} + +func (f tokenCacheFile) WriteToken(t *oauth2.Token) error { + jt, err := json.Marshal(t) + if err != nil { + return err + } + return ioutil.WriteFile(string(f), jt, 0600) +} diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.key b/vendor/golang.org/x/net/http2/h2demo/rootCA.key new file mode 100644 index 000000000..a15a6abaf --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q +62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby +XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV +mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ +JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ +SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA +nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e +/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx +qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser +hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j +NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E +LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7 +8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c +0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws +K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd +bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo +QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt +Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1 +nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy +b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7 +gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev +WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr +C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj +x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA +hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y +-----END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem new file mode 100644 index 000000000..3a323e774 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG +A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 +DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 +NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG +cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv +c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS +R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT +ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk +JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 +mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW +caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G +A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt +hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB +MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES +MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv +bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h +U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao +eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 +UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD +58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n +sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF +kPe6XoSbiLm/kxk32T0= +-----END CERTIFICATE----- diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl new file mode 100644 index 000000000..6db389188 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl @@ -0,0 +1 @@ +E2CE26BF3285059C diff --git a/vendor/golang.org/x/net/http2/h2demo/server.crt b/vendor/golang.org/x/net/http2/h2demo/server.crt new file mode 100644 index 000000000..c59059bd6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/server.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV +UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT +C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW +DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow +RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE +ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l +gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2 +dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL +A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws +/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88 +F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB +AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R +rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD +EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19 +KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI +dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU +90p6/CbU71bGbfpM2PHot2fm +-----END CERTIFICATE----- diff --git a/vendor/golang.org/x/net/http2/h2demo/server.key b/vendor/golang.org/x/net/http2/h2demo/server.key new file mode 100644 index 000000000..f329c1421 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi +fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm +J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef +b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55 +mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/ +fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p +3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3 +qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4 +NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80 +LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN +a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+ +Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL +W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO +gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm +S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS +Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp +V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4 +KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4 +yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5 +drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e +ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R +48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5 +c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY +nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl +IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd +-----END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/net/http2/h2demo/service.yaml b/vendor/golang.org/x/net/http2/h2demo/service.yaml new file mode 100644 index 000000000..2b7d54119 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: h2demo +spec: + externalTrafficPolicy: Local + ports: + - port: 80 + targetPort: 80 + name: http + - port: 443 + targetPort: 443 + name: https + selector: + app: h2demo + type: LoadBalancer + loadBalancerIP: 130.211.116.44 diff --git a/vendor/golang.org/x/net/http2/h2demo/tmpl.go b/vendor/golang.org/x/net/http2/h2demo/tmpl.go new file mode 100644 index 000000000..504d6a78a --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/tmpl.go @@ -0,0 +1,1991 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build h2demo + +package main + +import "html/template" + +var pushTmpl = template.Must(template.New("serverpush").Parse(` + + + + + + + + + HTTP/2 Server Push Demo + + + + + + + + + +
    +Note: This page exists for demonstration purposes. For the actual cmd/go docs, go to golang.org/cmd/go. +
    + + + +
    +... +
    + + + + +
    +
    +
    +
    + Run + Format + + + +
    +
    + + +
    +
    + + +

    Command go

    + + + + + + + + + + + + + + +

    +Go is a tool for managing Go source code. +

    +

    +Usage: +

    +
    go command [arguments]
    +
    +

    +The commands are: +

    +
    build       compile packages and dependencies
    +clean       remove object files
    +doc         show documentation for package or symbol
    +env         print Go environment information
    +bug         start a bug report
    +fix         run go tool fix on packages
    +fmt         run gofmt on package sources
    +generate    generate Go files by processing source
    +get         download and install packages and dependencies
    +install     compile and install packages and dependencies
    +list        list packages
    +run         compile and run Go program
    +test        test packages
    +tool        run specified go tool
    +version     print Go version
    +vet         run go tool vet on packages
    +
    +

    +Use "go help [command]" for more information about a command. +

    +

    +Additional help topics: +

    +
    c           calling between Go and C
    +buildmode   description of build modes
    +filetype    file types
    +gopath      GOPATH environment variable
    +environment environment variables
    +importpath  import path syntax
    +packages    description of package lists
    +testflag    description of testing flags
    +testfunc    description of testing functions
    +
    +

    +Use "go help [topic]" for more information about that topic. +

    +

    Compile packages and dependencies

    +

    +Usage: +

    +
    go build [-o output] [-i] [build flags] [packages]
    +
    +

    +Build compiles the packages named by the import paths, +along with their dependencies, but it does not install the results. +

    +

    +If the arguments to build are a list of .go files, build treats +them as a list of source files specifying a single package. +

    +

    +When compiling a single main package, build writes +the resulting executable to an output file named after +the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe') +or the source code directory ('go build unix/sam' writes 'sam' or 'sam.exe'). +The '.exe' suffix is added when writing a Windows executable. +

    +

    +When compiling multiple packages or a single non-main package, +build compiles the packages but discards the resulting object, +serving only as a check that the packages can be built. +

    +

    +When compiling packages, build ignores files that end in '_test.go'. +

    +

    +The -o flag, only allowed when compiling a single package, +forces build to write the resulting executable or object +to the named output file, instead of the default behavior described +in the last two paragraphs. +

    +

    +The -i flag installs the packages that are dependencies of the target. +

    +

    +The build flags are shared by the build, clean, get, install, list, run, +and test commands: +

    +
    -a
    +	force rebuilding of packages that are already up-to-date.
    +-n
    +	print the commands but do not run them.
    +-p n
    +	the number of programs, such as build commands or
    +	test binaries, that can be run in parallel.
    +	The default is the number of CPUs available.
    +-race
    +	enable data race detection.
    +	Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64.
    +-msan
    +	enable interoperation with memory sanitizer.
    +	Supported only on linux/amd64,
    +	and only with Clang/LLVM as the host C compiler.
    +-v
    +	print the names of packages as they are compiled.
    +-work
    +	print the name of the temporary work directory and
    +	do not delete it when exiting.
    +-x
    +	print the commands.
    +
    +-asmflags 'flag list'
    +	arguments to pass on each go tool asm invocation.
    +-buildmode mode
    +	build mode to use. See 'go help buildmode' for more.
    +-compiler name
    +	name of compiler to use, as in runtime.Compiler (gccgo or gc).
    +-gccgoflags 'arg list'
    +	arguments to pass on each gccgo compiler/linker invocation.
    +-gcflags 'arg list'
    +	arguments to pass on each go tool compile invocation.
    +-installsuffix suffix
    +	a suffix to use in the name of the package installation directory,
    +	in order to keep output separate from default builds.
    +	If using the -race flag, the install suffix is automatically set to race
    +	or, if set explicitly, has _race appended to it.  Likewise for the -msan
    +	flag.  Using a -buildmode option that requires non-default compile flags
    +	has a similar effect.
    +-ldflags 'flag list'
    +	arguments to pass on each go tool link invocation.
    +-linkshared
    +	link against shared libraries previously created with
    +	-buildmode=shared.
    +-pkgdir dir
    +	install and load all packages from dir instead of the usual locations.
    +	For example, when building with a non-standard configuration,
    +	use -pkgdir to keep generated packages in a separate location.
    +-tags 'tag list'
    +	a list of build tags to consider satisfied during the build.
    +	For more information about build tags, see the description of
    +	build constraints in the documentation for the go/build package.
    +-toolexec 'cmd args'
    +	a program to use to invoke toolchain programs like vet and asm.
    +	For example, instead of running asm, the go command will run
    +	'cmd args /path/to/asm <arguments for asm>'.
    +
    +

    +The list flags accept a space-separated list of strings. To embed spaces +in an element in the list, surround it with either single or double quotes. +

    +

    +For more about specifying packages, see 'go help packages'. +For more about where packages and binaries are installed, +run 'go help gopath'. +For more about calling between Go and C/C++, run 'go help c'. +

    +

    +Note: Build adheres to certain conventions such as those described +by 'go help gopath'. Not all projects can follow these conventions, +however. Installations that have their own conventions or that use +a separate software build system may choose to use lower-level +invocations such as 'go tool compile' and 'go tool link' to avoid +some of the overheads and design decisions of the build tool. +

    +

    +See also: go install, go get, go clean. +

    +

    Remove object files

    +

    +Usage: +

    +
    go clean [-i] [-r] [-n] [-x] [build flags] [packages]
    +
    +

    +Clean removes object files from package source directories. +The go command builds most objects in a temporary directory, +so go clean is mainly concerned with object files left by other +tools or by manual invocations of go build. +

    +

    +Specifically, clean removes the following files from each of the +source directories corresponding to the import paths: +

    +
    _obj/            old object directory, left from Makefiles
    +_test/           old test directory, left from Makefiles
    +_testmain.go     old gotest file, left from Makefiles
    +test.out         old test log, left from Makefiles
    +build.out        old test log, left from Makefiles
    +*.[568ao]        object files, left from Makefiles
    +
    +DIR(.exe)        from go build
    +DIR.test(.exe)   from go test -c
    +MAINFILE(.exe)   from go build MAINFILE.go
    +*.so             from SWIG
    +
    +

    +In the list, DIR represents the final path element of the +directory, and MAINFILE is the base name of any Go source +file in the directory that is not included when building +the package. +

    +

    +The -i flag causes clean to remove the corresponding installed +archive or binary (what 'go install' would create). +

    +

    +The -n flag causes clean to print the remove commands it would execute, +but not run them. +

    +

    +The -r flag causes clean to be applied recursively to all the +dependencies of the packages named by the import paths. +

    +

    +The -x flag causes clean to print remove commands as it executes them. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Show documentation for package or symbol

    +

    +Usage: +

    +
    go doc [-u] [-c] [package|[package.]symbol[.method]]
    +
    +

    +Doc prints the documentation comments associated with the item identified by its +arguments (a package, const, func, type, var, or method) followed by a one-line +summary of each of the first-level items "under" that item (package-level +declarations for a package, methods for a type, etc.). +

    +

    +Doc accepts zero, one, or two arguments. +

    +

    +Given no arguments, that is, when run as +

    +
    go doc
    +
    +

    +it prints the package documentation for the package in the current directory. +If the package is a command (package main), the exported symbols of the package +are elided from the presentation unless the -cmd flag is provided. +

    +

    +When run with one argument, the argument is treated as a Go-syntax-like +representation of the item to be documented. What the argument selects depends +on what is installed in GOROOT and GOPATH, as well as the form of the argument, +which is schematically one of these: +

    +
    go doc <pkg>
    +go doc <sym>[.<method>]
    +go doc [<pkg>.]<sym>[.<method>]
    +go doc [<pkg>.][<sym>.]<method>
    +
    +

    +The first item in this list matched by the argument is the one whose documentation +is printed. (See the examples below.) However, if the argument starts with a capital +letter it is assumed to identify a symbol or method in the current directory. +

    +

    +For packages, the order of scanning is determined lexically in breadth-first order. +That is, the package presented is the one that matches the search and is nearest +the root and lexically first at its level of the hierarchy. The GOROOT tree is +always scanned in its entirety before GOPATH. +

    +

    +If there is no package specified or matched, the package in the current +directory is selected, so "go doc Foo" shows the documentation for symbol Foo in +the current package. +

    +

    +The package path must be either a qualified path or a proper suffix of a +path. The go tool's usual package mechanism does not apply: package path +elements like . and ... are not implemented by go doc. +

    +

    +When run with two arguments, the first must be a full package path (not just a +suffix), and the second is a symbol or symbol and method; this is similar to the +syntax accepted by godoc: +

    +
    go doc <pkg> <sym>[.<method>]
    +
    +

    +In all forms, when matching symbols, lower-case letters in the argument match +either case but upper-case letters match exactly. This means that there may be +multiple matches of a lower-case argument in a package if different symbols have +different cases. If this occurs, documentation for all matches is printed. +

    +

    +Examples: +

    +
    go doc
    +	Show documentation for current package.
    +go doc Foo
    +	Show documentation for Foo in the current package.
    +	(Foo starts with a capital letter so it cannot match
    +	a package path.)
    +go doc encoding/json
    +	Show documentation for the encoding/json package.
    +go doc json
    +	Shorthand for encoding/json.
    +go doc json.Number (or go doc json.number)
    +	Show documentation and method summary for json.Number.
    +go doc json.Number.Int64 (or go doc json.number.int64)
    +	Show documentation for json.Number's Int64 method.
    +go doc cmd/doc
    +	Show package docs for the doc command.
    +go doc -cmd cmd/doc
    +	Show package docs and exported symbols within the doc command.
    +go doc template.new
    +	Show documentation for html/template's New function.
    +	(html/template is lexically before text/template)
    +go doc text/template.new # One argument
    +	Show documentation for text/template's New function.
    +go doc text/template new # Two arguments
    +	Show documentation for text/template's New function.
    +
    +At least in the current tree, these invocations all print the
    +documentation for json.Decoder's Decode method:
    +
    +go doc json.Decoder.Decode
    +go doc json.decoder.decode
    +go doc json.decode
    +cd go/src/encoding/json; go doc decode
    +
    +

    +Flags: +

    +
    -c
    +	Respect case when matching symbols.
    +-cmd
    +	Treat a command (package main) like a regular package.
    +	Otherwise package main's exported symbols are hidden
    +	when showing the package's top-level documentation.
    +-u
    +	Show documentation for unexported as well as exported
    +	symbols and methods.
    +
    +

    Print Go environment information

    +

    +Usage: +

    +
    go env [var ...]
    +
    +

    +Env prints Go environment information. +

    +

    +By default env prints information as a shell script +(on Windows, a batch file). If one or more variable +names is given as arguments, env prints the value of +each named variable on its own line. +

    +

    Start a bug report

    +

    +Usage: +

    +
    go bug
    +
    +

    +Bug opens the default browser and starts a new bug report. +The report includes useful system information. +

    +

    Run go tool fix on packages

    +

    +Usage: +

    +
    go fix [packages]
    +
    +

    +Fix runs the Go fix command on the packages named by the import paths. +

    +

    +For more about fix, see 'go doc cmd/fix'. +For more about specifying packages, see 'go help packages'. +

    +

    +To run fix with specific options, run 'go tool fix'. +

    +

    +See also: go fmt, go vet. +

    +

    Run gofmt on package sources

    +

    +Usage: +

    +
    go fmt [-n] [-x] [packages]
    +
    +

    +Fmt runs the command 'gofmt -l -w' on the packages named +by the import paths. It prints the names of the files that are modified. +

    +

    +For more about gofmt, see 'go doc cmd/gofmt'. +For more about specifying packages, see 'go help packages'. +

    +

    +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +To run gofmt with specific options, run gofmt itself. +

    +

    +See also: go fix, go vet. +

    +

    Generate Go files by processing source

    +

    +Usage: +

    +
    go generate [-run regexp] [-n] [-v] [-x] [build flags] [file.go... | packages]
    +
    +

    +Generate runs commands described by directives within existing +files. Those commands can run any process but the intent is to +create or update Go source files. +

    +

    +Go generate is never run automatically by go build, go get, go test, +and so on. It must be run explicitly. +

    +

    +Go generate scans the file for directives, which are lines of +the form, +

    +
    //go:generate command argument...
    +
    +

    +(note: no leading spaces and no space in "//go") where command +is the generator to be run, corresponding to an executable file +that can be run locally. It must either be in the shell path +(gofmt), a fully qualified path (/usr/you/bin/mytool), or a +command alias, described below. +

    +

    +Note that go generate does not parse the file, so lines that look +like directives in comments or multiline strings will be treated +as directives. +

    +

    +The arguments to the directive are space-separated tokens or +double-quoted strings passed to the generator as individual +arguments when it is run. +

    +

    +Quoted strings use Go syntax and are evaluated before execution; a +quoted string appears as a single argument to the generator. +

    +

    +Go generate sets several variables when it runs the generator: +

    +
    $GOARCH
    +	The execution architecture (arm, amd64, etc.)
    +$GOOS
    +	The execution operating system (linux, windows, etc.)
    +$GOFILE
    +	The base name of the file.
    +$GOLINE
    +	The line number of the directive in the source file.
    +$GOPACKAGE
    +	The name of the package of the file containing the directive.
    +$DOLLAR
    +	A dollar sign.
    +
    +

    +Other than variable substitution and quoted-string evaluation, no +special processing such as "globbing" is performed on the command +line. +

    +

    +As a last step before running the command, any invocations of any +environment variables with alphanumeric names, such as $GOFILE or +$HOME, are expanded throughout the command line. The syntax for +variable expansion is $NAME on all operating systems. Due to the +order of evaluation, variables are expanded even inside quoted +strings. If the variable NAME is not set, $NAME expands to the +empty string. +

    +

    +A directive of the form, +

    +
    //go:generate -command xxx args...
    +
    +

    +specifies, for the remainder of this source file only, that the +string xxx represents the command identified by the arguments. This +can be used to create aliases or to handle multiword generators. +For example, +

    +
    //go:generate -command foo go tool foo
    +
    +

    +specifies that the command "foo" represents the generator +"go tool foo". +

    +

    +Generate processes packages in the order given on the command line, +one at a time. If the command line lists .go files, they are treated +as a single package. Within a package, generate processes the +source files in a package in file name order, one at a time. Within +a source file, generate runs generators in the order they appear +in the file, one at a time. +

    +

    +If any generator returns an error exit status, "go generate" skips +all further processing for that package. +

    +

    +The generator is run in the package's source directory. +

    +

    +Go generate accepts one specific flag: +

    +
    -run=""
    +	if non-empty, specifies a regular expression to select
    +	directives whose full original source text (excluding
    +	any trailing spaces and final newline) matches the
    +	expression.
    +
    +

    +It also accepts the standard build flags including -v, -n, and -x. +The -v flag prints the names of packages and files as they are +processed. +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Download and install packages and dependencies

    +

    +Usage: +

    +
    go get [-d] [-f] [-fix] [-insecure] [-t] [-u] [build flags] [packages]
    +
    +

    +Get downloads the packages named by the import paths, along with their +dependencies. It then installs the named packages, like 'go install'. +

    +

    +The -d flag instructs get to stop after downloading the packages; that is, +it instructs get not to install the packages. +

    +

    +The -f flag, valid only when -u is set, forces get -u not to verify that +each package has been checked out from the source control repository +implied by its import path. This can be useful if the source is a local fork +of the original. +

    +

    +The -fix flag instructs get to run the fix tool on the downloaded packages +before resolving dependencies or building the code. +

    +

    +The -insecure flag permits fetching from repositories and resolving +custom domains using insecure schemes such as HTTP. Use with caution. +

    +

    +The -t flag instructs get to also download the packages required to build +the tests for the specified packages. +

    +

    +The -u flag instructs get to use the network to update the named packages +and their dependencies. By default, get uses the network to check out +missing packages but does not use it to look for updates to existing packages. +

    +

    +The -v flag enables verbose progress and debug output. +

    +

    +Get also accepts build flags to control the installation. See 'go help build'. +

    +

    +When checking out a new package, get creates the target directory +GOPATH/src/<import-path>. If the GOPATH contains multiple entries, +get uses the first one. For more details see: 'go help gopath'. +

    +

    +When checking out or updating a package, get looks for a branch or tag +that matches the locally installed version of Go. The most important +rule is that if the local installation is running version "go1", get +searches for a branch or tag named "go1". If no such version exists it +retrieves the most recent version of the package. +

    +

    +When go get checks out or updates a Git repository, +it also updates any git submodules referenced by the repository. +

    +

    +Get never checks out or updates code stored in vendor directories. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    +For more about how 'go get' finds source code to +download, see 'go help importpath'. +

    +

    +See also: go build, go install, go clean. +

    +

    Compile and install packages and dependencies

    +

    +Usage: +

    +
    go install [build flags] [packages]
    +
    +

    +Install compiles and installs the packages named by the import paths, +along with their dependencies. +

    +

    +For more about the build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. +

    +

    +See also: go build, go get, go clean. +

    +

    List packages

    +

    +Usage: +

    +
    go list [-e] [-f format] [-json] [build flags] [packages]
    +
    +

    +List lists the packages named by the import paths, one per line. +

    +

    +The default output shows the package import path: +

    +
    bytes
    +encoding/json
    +github.com/gorilla/mux
    +golang.org/x/net/html
    +
    +

    +The -f flag specifies an alternate format for the list, using the +syntax of package template. The default output is equivalent to -f +''. The struct being passed to the template is: +

    +
    type Package struct {
    +    Dir           string // directory containing package sources
    +    ImportPath    string // import path of package in dir
    +    ImportComment string // path in import comment on package statement
    +    Name          string // package name
    +    Doc           string // package documentation string
    +    Target        string // install path
    +    Shlib         string // the shared library that contains this package (only set when -linkshared)
    +    Goroot        bool   // is this package in the Go root?
    +    Standard      bool   // is this package part of the standard Go library?
    +    Stale         bool   // would 'go install' do anything for this package?
    +    StaleReason   string // explanation for Stale==true
    +    Root          string // Go root or Go path dir containing this package
    +    ConflictDir   string // this directory shadows Dir in $GOPATH
    +    BinaryOnly    bool   // binary-only package: cannot be recompiled from sources
    +
    +    // Source files
    +    GoFiles        []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
    +    CgoFiles       []string // .go sources files that import "C"
    +    IgnoredGoFiles []string // .go sources ignored due to build constraints
    +    CFiles         []string // .c source files
    +    CXXFiles       []string // .cc, .cxx and .cpp source files
    +    MFiles         []string // .m source files
    +    HFiles         []string // .h, .hh, .hpp and .hxx source files
    +    FFiles         []string // .f, .F, .for and .f90 Fortran source files
    +    SFiles         []string // .s source files
    +    SwigFiles      []string // .swig files
    +    SwigCXXFiles   []string // .swigcxx files
    +    SysoFiles      []string // .syso object files to add to archive
    +    TestGoFiles    []string // _test.go files in package
    +    XTestGoFiles   []string // _test.go files outside package
    +
    +    // Cgo directives
    +    CgoCFLAGS    []string // cgo: flags for C compiler
    +    CgoCPPFLAGS  []string // cgo: flags for C preprocessor
    +    CgoCXXFLAGS  []string // cgo: flags for C++ compiler
    +    CgoFFLAGS    []string // cgo: flags for Fortran compiler
    +    CgoLDFLAGS   []string // cgo: flags for linker
    +    CgoPkgConfig []string // cgo: pkg-config names
    +
    +    // Dependency information
    +    Imports      []string // import paths used by this package
    +    Deps         []string // all (recursively) imported dependencies
    +    TestImports  []string // imports from TestGoFiles
    +    XTestImports []string // imports from XTestGoFiles
    +
    +    // Error information
    +    Incomplete bool            // this package or a dependency has an error
    +    Error      *PackageError   // error loading package
    +    DepsErrors []*PackageError // errors loading dependencies
    +}
    +
    +

    +Packages stored in vendor directories report an ImportPath that includes the +path to the vendor directory (for example, "d/vendor/p" instead of "p"), +so that the ImportPath uniquely identifies a given copy of a package. +The Imports, Deps, TestImports, and XTestImports lists also contain these +expanded imports paths. See golang.org/s/go15vendor for more about vendoring. +

    +

    +The error information, if any, is +

    +
    type PackageError struct {
    +    ImportStack   []string // shortest path from package named on command line to this one
    +    Pos           string   // position of error (if present, file:line:col)
    +    Err           string   // the error itself
    +}
    +
    +

    +The template function "join" calls strings.Join. +

    +

    +The template function "context" returns the build context, defined as: +

    +
    type Context struct {
    +	GOARCH        string   // target architecture
    +	GOOS          string   // target operating system
    +	GOROOT        string   // Go root
    +	GOPATH        string   // Go path
    +	CgoEnabled    bool     // whether cgo can be used
    +	UseAllFiles   bool     // use files regardless of +build lines, file names
    +	Compiler      string   // compiler to assume when computing target paths
    +	BuildTags     []string // build constraints to match in +build lines
    +	ReleaseTags   []string // releases the current release is compatible with
    +	InstallSuffix string   // suffix to use in the name of the install dir
    +}
    +
    +

    +For more information about the meaning of these fields see the documentation +for the go/build package's Context type. +

    +

    +The -json flag causes the package data to be printed in JSON format +instead of using the template format. +

    +

    +The -e flag changes the handling of erroneous packages, those that +cannot be found or are malformed. By default, the list command +prints an error to standard error for each erroneous package and +omits the packages from consideration during the usual printing. +With the -e flag, the list command never prints errors to standard +error and instead processes the erroneous packages with the usual +printing. Erroneous packages will have a non-empty ImportPath and +a non-nil Error field; other information may or may not be missing +(zeroed). +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Compile and run Go program

    +

    +Usage: +

    +
    go run [build flags] [-exec xprog] gofiles... [arguments...]
    +
    +

    +Run compiles and runs the main package comprising the named Go source files. +A Go source file is defined to be a file ending in a literal ".go" suffix. +

    +

    +By default, 'go run' runs the compiled binary directly: 'a.out arguments...'. +If the -exec flag is given, 'go run' invokes the binary using xprog: +

    +
    'xprog a.out arguments...'.
    +
    +

    +If the -exec flag is not given, GOOS or GOARCH is different from the system +default, and a program named go_$GOOS_$GOARCH_exec can be found +on the current search path, 'go run' invokes the binary using that program, +for example 'go_nacl_386_exec a.out arguments...'. This allows execution of +cross-compiled programs when a simulator or other execution method is +available. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +See also: go build. +

    +

    Test packages

    +

    +Usage: +

    +
    go test [build/test flags] [packages] [build/test flags & test binary flags]
    +
    +

    +'Go test' automates testing the packages named by the import paths. +It prints a summary of the test results in the format: +

    +
    ok   archive/tar   0.011s
    +FAIL archive/zip   0.022s
    +ok   compress/gzip 0.033s
    +...
    +
    +

    +followed by detailed output for each failed package. +

    +

    +'Go test' recompiles each package along with any files with names matching +the file pattern "*_test.go". +Files whose names begin with "_" (including "_test.go") or "." are ignored. +These additional files can contain test functions, benchmark functions, and +example functions. See 'go help testfunc' for more. +Each listed package causes the execution of a separate test binary. +

    +

    +Test files that declare a package with the suffix "_test" will be compiled as a +separate package, and then linked and run with the main test binary. +

    +

    +The go tool will ignore a directory named "testdata", making it available +to hold ancillary data needed by the tests. +

    +

    +By default, go test needs no arguments. It compiles and tests the package +with source in the current directory, including tests, and runs the tests. +

    +

    +The package is built in a temporary directory so it does not interfere with the +non-test installation. +

    +

    +In addition to the build flags, the flags handled by 'go test' itself are: +

    +
    -args
    +    Pass the remainder of the command line (everything after -args)
    +    to the test binary, uninterpreted and unchanged.
    +    Because this flag consumes the remainder of the command line,
    +    the package list (if present) must appear before this flag.
    +
    +-c
    +    Compile the test binary to pkg.test but do not run it
    +    (where pkg is the last element of the package's import path).
    +    The file name can be changed with the -o flag.
    +
    +-exec xprog
    +    Run the test binary using xprog. The behavior is the same as
    +    in 'go run'. See 'go help run' for details.
    +
    +-i
    +    Install packages that are dependencies of the test.
    +    Do not run the test.
    +
    +-o file
    +    Compile the test binary to the named file.
    +    The test still runs (unless -c or -i is specified).
    +
    +

    +The test binary also accepts flags that control execution of the test; these +flags are also accessible by 'go test'. See 'go help testflag' for details. +

    +

    +For more about build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. +

    +

    +See also: go build, go vet. +

    +

    Run specified go tool

    +

    +Usage: +

    +
    go tool [-n] command [args...]
    +
    +

    +Tool runs the go tool command identified by the arguments. +With no arguments it prints the list of known tools. +

    +

    +The -n flag causes tool to print the command that would be +executed but not execute it. +

    +

    +For more about each tool command, see 'go tool command -h'. +

    +

    Print Go version

    +

    +Usage: +

    +
    go version
    +
    +

    +Version prints the Go version, as reported by runtime.Version. +

    +

    Run go tool vet on packages

    +

    +Usage: +

    +
    go vet [-n] [-x] [build flags] [packages]
    +
    +

    +Vet runs the Go vet command on the packages named by the import paths. +

    +

    +For more about vet, see 'go doc cmd/vet'. +For more about specifying packages, see 'go help packages'. +

    +

    +To run the vet tool with specific options, run 'go tool vet'. +

    +

    +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +See also: go fmt, go fix. +

    +

    Calling between Go and C

    +

    +There are two different ways to call between Go and C/C++ code. +

    +

    +The first is the cgo tool, which is part of the Go distribution. For +information on how to use it see the cgo documentation (go doc cmd/cgo). +

    +

    +The second is the SWIG program, which is a general tool for +interfacing between languages. For information on SWIG see +http://swig.org/. When running go build, any file with a .swig +extension will be passed to SWIG. Any file with a .swigcxx extension +will be passed to SWIG with the -c++ option. +

    +

    +When either cgo or SWIG is used, go build will pass any .c, .m, .s, +or .S files to the C compiler, and any .cc, .cpp, .cxx files to the C++ +compiler. The CC or CXX environment variables may be set to determine +the C or C++ compiler, respectively, to use. +

    +

    Description of build modes

    +

    +The 'go build' and 'go install' commands take a -buildmode argument which +indicates which kind of object file is to be built. Currently supported values +are: +

    +
    -buildmode=archive
    +	Build the listed non-main packages into .a files. Packages named
    +	main are ignored.
    +
    +-buildmode=c-archive
    +	Build the listed main package, plus all packages it imports,
    +	into a C archive file. The only callable symbols will be those
    +	functions exported using a cgo //export comment. Requires
    +	exactly one main package to be listed.
    +
    +-buildmode=c-shared
    +	Build the listed main packages, plus all packages that they
    +	import, into C shared libraries. The only callable symbols will
    +	be those functions exported using a cgo //export comment.
    +	Non-main packages are ignored.
    +
    +-buildmode=default
    +	Listed main packages are built into executables and listed
    +	non-main packages are built into .a files (the default
    +	behavior).
    +
    +-buildmode=shared
    +	Combine all the listed non-main packages into a single shared
    +	library that will be used when building with the -linkshared
    +	option. Packages named main are ignored.
    +
    +-buildmode=exe
    +	Build the listed main packages and everything they import into
    +	executables. Packages not named main are ignored.
    +
    +-buildmode=pie
    +	Build the listed main packages and everything they import into
    +	position independent executables (PIE). Packages not named
    +	main are ignored.
    +
    +-buildmode=plugin
    +	Build the listed main packages, plus all packages that they
    +	import, into a Go plugin. Packages not named main are ignored.
    +
    +

    File types

    +

    +The go command examines the contents of a restricted set of files +in each directory. It identifies which files to examine based on +the extension of the file name. These extensions are: +

    +
    .go
    +	Go source files.
    +.c, .h
    +	C source files.
    +	If the package uses cgo or SWIG, these will be compiled with the
    +	OS-native compiler (typically gcc); otherwise they will
    +	trigger an error.
    +.cc, .cpp, .cxx, .hh, .hpp, .hxx
    +	C++ source files. Only useful with cgo or SWIG, and always
    +	compiled with the OS-native compiler.
    +.m
    +	Objective-C source files. Only useful with cgo, and always
    +	compiled with the OS-native compiler.
    +.s, .S
    +	Assembler source files.
    +	If the package uses cgo or SWIG, these will be assembled with the
    +	OS-native assembler (typically gcc (sic)); otherwise they
    +	will be assembled with the Go assembler.
    +.swig, .swigcxx
    +	SWIG definition files.
    +.syso
    +	System object files.
    +
    +

    +Files of each of these types except .syso may contain build +constraints, but the go command stops scanning for build constraints +at the first item in the file that is not a blank line or //-style +line comment. See the go/build package documentation for +more details. +

    +

    +Non-test Go source files can also include a //go:binary-only-package +comment, indicating that the package sources are included +for documentation only and must not be used to build the +package binary. This enables distribution of Go packages in +their compiled form alone. See the go/build package documentation +for more details. +

    +

    GOPATH environment variable

    +

    +The Go path is used to resolve import statements. +It is implemented by and documented in the go/build package. +

    +

    +The GOPATH environment variable lists places to look for Go code. +On Unix, the value is a colon-separated string. +On Windows, the value is a semicolon-separated string. +On Plan 9, the value is a list. +

    +

    +If the environment variable is unset, GOPATH defaults +to a subdirectory named "go" in the user's home directory +($HOME/go on Unix, %USERPROFILE%\go on Windows), +unless that directory holds a Go distribution. +Run "go env GOPATH" to see the current GOPATH. +

    +

    +See https://golang.org/wiki/SettingGOPATH to set a custom GOPATH. +

    +

    +Each directory listed in GOPATH must have a prescribed structure: +

    +

    +The src directory holds source code. The path below src +determines the import path or executable name. +

    +

    +The pkg directory holds installed package objects. +As in the Go tree, each target operating system and +architecture pair has its own subdirectory of pkg +(pkg/GOOS_GOARCH). +

    +

    +If DIR is a directory listed in the GOPATH, a package with +source in DIR/src/foo/bar can be imported as "foo/bar" and +has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a". +

    +

    +The bin directory holds compiled commands. +Each command is named for its source directory, but only +the final element, not the entire path. That is, the +command with source in DIR/src/foo/quux is installed into +DIR/bin/quux, not DIR/bin/foo/quux. The "foo/" prefix is stripped +so that you can add DIR/bin to your PATH to get at the +installed commands. If the GOBIN environment variable is +set, commands are installed to the directory it names instead +of DIR/bin. GOBIN must be an absolute path. +

    +

    +Here's an example directory layout: +

    +
    GOPATH=/home/user/go
    +
    +/home/user/go/
    +    src/
    +        foo/
    +            bar/               (go code in package bar)
    +                x.go
    +            quux/              (go code in package main)
    +                y.go
    +    bin/
    +        quux                   (installed command)
    +    pkg/
    +        linux_amd64/
    +            foo/
    +                bar.a          (installed package object)
    +
    +

    +Go searches each directory listed in GOPATH to find source code, +but new packages are always downloaded into the first directory +in the list. +

    +

    +See https://golang.org/doc/code.html for an example. +

    +

    Internal Directories

    +

    +Code in or below a directory named "internal" is importable only +by code in the directory tree rooted at the parent of "internal". +Here's an extended version of the directory layout above: +

    +
    /home/user/go/
    +    src/
    +        crash/
    +            bang/              (go code in package bang)
    +                b.go
    +        foo/                   (go code in package foo)
    +            f.go
    +            bar/               (go code in package bar)
    +                x.go
    +            internal/
    +                baz/           (go code in package baz)
    +                    z.go
    +            quux/              (go code in package main)
    +                y.go
    +
    +

    +The code in z.go is imported as "foo/internal/baz", but that +import statement can only appear in source files in the subtree +rooted at foo. The source files foo/f.go, foo/bar/x.go, and +foo/quux/y.go can all import "foo/internal/baz", but the source file +crash/bang/b.go cannot. +

    +

    +See https://golang.org/s/go14internal for details. +

    +

    Vendor Directories

    +

    +Go 1.6 includes support for using local copies of external dependencies +to satisfy imports of those dependencies, often referred to as vendoring. +

    +

    +Code below a directory named "vendor" is importable only +by code in the directory tree rooted at the parent of "vendor", +and only using an import path that omits the prefix up to and +including the vendor element. +

    +

    +Here's the example from the previous section, +but with the "internal" directory renamed to "vendor" +and a new foo/vendor/crash/bang directory added: +

    +
    /home/user/go/
    +    src/
    +        crash/
    +            bang/              (go code in package bang)
    +                b.go
    +        foo/                   (go code in package foo)
    +            f.go
    +            bar/               (go code in package bar)
    +                x.go
    +            vendor/
    +                crash/
    +                    bang/      (go code in package bang)
    +                        b.go
    +                baz/           (go code in package baz)
    +                    z.go
    +            quux/              (go code in package main)
    +                y.go
    +
    +

    +The same visibility rules apply as for internal, but the code +in z.go is imported as "baz", not as "foo/vendor/baz". +

    +

    +Code in vendor directories deeper in the source tree shadows +code in higher directories. Within the subtree rooted at foo, an import +of "crash/bang" resolves to "foo/vendor/crash/bang", not the +top-level "crash/bang". +

    +

    +Code in vendor directories is not subject to import path +checking (see 'go help importpath'). +

    +

    +When 'go get' checks out or updates a git repository, it now also +updates submodules. +

    +

    +Vendor directories do not affect the placement of new repositories +being checked out for the first time by 'go get': those are always +placed in the main GOPATH, never in a vendor subtree. +

    +

    +See https://golang.org/s/go15vendor for details. +

    +

    Environment variables

    +

    +The go command, and the tools it invokes, examine a few different +environment variables. For many of these, you can see the default +value of on your system by running 'go env NAME', where NAME is the +name of the variable. +

    +

    +General-purpose environment variables: +

    +
    GCCGO
    +	The gccgo command to run for 'go build -compiler=gccgo'.
    +GOARCH
    +	The architecture, or processor, for which to compile code.
    +	Examples are amd64, 386, arm, ppc64.
    +GOBIN
    +	The directory where 'go install' will install a command.
    +GOOS
    +	The operating system for which to compile code.
    +	Examples are linux, darwin, windows, netbsd.
    +GOPATH
    +	For more details see: 'go help gopath'.
    +GORACE
    +	Options for the race detector.
    +	See https://golang.org/doc/articles/race_detector.html.
    +GOROOT
    +	The root of the go tree.
    +
    +

    +Environment variables for use with cgo: +

    +
    CC
    +	The command to use to compile C code.
    +CGO_ENABLED
    +	Whether the cgo command is supported.  Either 0 or 1.
    +CGO_CFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C code.
    +CGO_CPPFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C or C++ code.
    +CGO_CXXFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C++ code.
    +CGO_FFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	Fortran code.
    +CGO_LDFLAGS
    +	Flags that cgo will pass to the compiler when linking.
    +CXX
    +	The command to use to compile C++ code.
    +PKG_CONFIG
    +	Path to pkg-config tool.
    +
    +

    +Architecture-specific environment variables: +

    +
    GOARM
    +	For GOARCH=arm, the ARM architecture for which to compile.
    +	Valid values are 5, 6, 7.
    +GO386
    +	For GOARCH=386, the floating point instruction set.
    +	Valid values are 387, sse2.
    +
    +

    +Special-purpose environment variables: +

    +
    GOROOT_FINAL
    +	The root of the installed Go tree, when it is
    +	installed in a location other than where it is built.
    +	File names in stack traces are rewritten from GOROOT to
    +	GOROOT_FINAL.
    +GO_EXTLINK_ENABLED
    +	Whether the linker should use external linking mode
    +	when using -linkmode=auto with code that uses cgo.
    +	Set to 0 to disable external linking mode, 1 to enable it.
    +GIT_ALLOW_PROTOCOL
    +	Defined by Git. A colon-separated list of schemes that are allowed to be used
    +	with git fetch/clone. If set, any scheme not explicitly mentioned will be
    +	considered insecure by 'go get'.
    +
    +

    Import path syntax

    +

    +An import path (see 'go help packages') denotes a package stored in the local +file system. In general, an import path denotes either a standard package (such +as "unicode/utf8") or a package found in one of the work spaces (For more +details see: 'go help gopath'). +

    +

    Relative import paths

    +

    +An import path beginning with ./ or ../ is called a relative path. +The toolchain supports relative import paths as a shortcut in two ways. +

    +

    +First, a relative path can be used as a shorthand on the command line. +If you are working in the directory containing the code imported as +"unicode" and want to run the tests for "unicode/utf8", you can type +"go test ./utf8" instead of needing to specify the full path. +Similarly, in the reverse situation, "go test .." will test "unicode" from +the "unicode/utf8" directory. Relative patterns are also allowed, like +"go test ./..." to test all subdirectories. See 'go help packages' for details +on the pattern syntax. +

    +

    +Second, if you are compiling a Go program not in a work space, +you can use a relative path in an import statement in that program +to refer to nearby code also not in a work space. +This makes it easy to experiment with small multipackage programs +outside of the usual work spaces, but such programs cannot be +installed with "go install" (there is no work space in which to install them), +so they are rebuilt from scratch each time they are built. +To avoid ambiguity, Go programs cannot use relative import paths +within a work space. +

    +

    Remote import paths

    +

    +Certain import paths also +describe how to obtain the source code for the package using +a revision control system. +

    +

    +A few common code hosting sites have special syntax: +

    +
    Bitbucket (Git, Mercurial)
    +
    +	import "bitbucket.org/user/project"
    +	import "bitbucket.org/user/project/sub/directory"
    +
    +GitHub (Git)
    +
    +	import "github.com/user/project"
    +	import "github.com/user/project/sub/directory"
    +
    +Launchpad (Bazaar)
    +
    +	import "launchpad.net/project"
    +	import "launchpad.net/project/series"
    +	import "launchpad.net/project/series/sub/directory"
    +
    +	import "launchpad.net/~user/project/branch"
    +	import "launchpad.net/~user/project/branch/sub/directory"
    +
    +IBM DevOps Services (Git)
    +
    +	import "hub.jazz.net/git/user/project"
    +	import "hub.jazz.net/git/user/project/sub/directory"
    +
    +

    +For code hosted on other servers, import paths may either be qualified +with the version control type, or the go tool can dynamically fetch +the import path over https/http and discover where the code resides +from a <meta> tag in the HTML. +

    +

    +To declare the code location, an import path of the form +

    +
    repository.vcs/path
    +
    +

    +specifies the given repository, with or without the .vcs suffix, +using the named version control system, and then the path inside +that repository. The supported version control systems are: +

    +
    Bazaar      .bzr
    +Git         .git
    +Mercurial   .hg
    +Subversion  .svn
    +
    +

    +For example, +

    +
    import "example.org/user/foo.hg"
    +
    +

    +denotes the root directory of the Mercurial repository at +example.org/user/foo or foo.hg, and +

    +
    import "example.org/repo.git/foo/bar"
    +
    +

    +denotes the foo/bar directory of the Git repository at +example.org/repo or repo.git. +

    +

    +When a version control system supports multiple protocols, +each is tried in turn when downloading. For example, a Git +download tries https://, then git+ssh://. +

    +

    +By default, downloads are restricted to known secure protocols +(e.g. https, ssh). To override this setting for Git downloads, the +GIT_ALLOW_PROTOCOL environment variable can be set (For more details see: +'go help environment'). +

    +

    +If the import path is not a known code hosting site and also lacks a +version control qualifier, the go tool attempts to fetch the import +over https/http and looks for a <meta> tag in the document's HTML +<head>. +

    +

    +The meta tag has the form: +

    +
    <meta name="go-import" content="import-prefix vcs repo-root">
    +
    +

    +The import-prefix is the import path corresponding to the repository +root. It must be a prefix or an exact match of the package being +fetched with "go get". If it's not an exact match, another http +request is made at the prefix to verify the <meta> tags match. +

    +

    +The meta tag should appear as early in the file as possible. +In particular, it should appear before any raw JavaScript or CSS, +to avoid confusing the go command's restricted parser. +

    +

    +The vcs is one of "git", "hg", "svn", etc, +

    +

    +The repo-root is the root of the version control system +containing a scheme and not containing a .vcs qualifier. +

    +

    +For example, +

    +
    import "example.org/pkg/foo"
    +
    +

    +will result in the following requests: +

    +
    https://example.org/pkg/foo?go-get=1 (preferred)
    +http://example.org/pkg/foo?go-get=1  (fallback, only with -insecure)
    +
    +

    +If that page contains the meta tag +

    +
    <meta name="go-import" content="example.org git https://code.org/r/p/exproj">
    +
    +

    +the go tool will verify that https://example.org/?go-get=1 contains the +same meta tag and then git clone https://code.org/r/p/exproj into +GOPATH/src/example.org. +

    +

    +New downloaded packages are written to the first directory listed in the GOPATH +environment variable (For more details see: 'go help gopath'). +

    +

    +The go command attempts to download the version of the +package appropriate for the Go release being used. +Run 'go help get' for more. +

    +

    Import path checking

    +

    +When the custom import path feature described above redirects to a +known code hosting site, each of the resulting packages has two possible +import paths, using the custom domain or the known hosting site. +

    +

    +A package statement is said to have an "import comment" if it is immediately +followed (before the next newline) by a comment of one of these two forms: +

    +
    package math // import "path"
    +package math /* import "path" */
    +
    +

    +The go command will refuse to install a package with an import comment +unless it is being referred to by that import path. In this way, import comments +let package authors make sure the custom import path is used and not a +direct path to the underlying code hosting site. +

    +

    +Import path checking is disabled for code found within vendor trees. +This makes it possible to copy code into alternate locations in vendor trees +without needing to update import comments. +

    +

    +See https://golang.org/s/go14customimport for details. +

    +

    Description of package lists

    +

    +Many commands apply to a set of packages: +

    +
    go action [packages]
    +
    +

    +Usually, [packages] is a list of import paths. +

    +

    +An import path that is a rooted path or that begins with +a . or .. element is interpreted as a file system path and +denotes the package in that directory. +

    +

    +Otherwise, the import path P denotes the package found in +the directory DIR/src/P for some DIR listed in the GOPATH +environment variable (For more details see: 'go help gopath'). +

    +

    +If no import paths are given, the action applies to the +package in the current directory. +

    +

    +There are four reserved names for paths that should not be used +for packages to be built with the go tool: +

    +

    +- "main" denotes the top-level package in a stand-alone executable. +

    +

    +- "all" expands to all package directories found in all the GOPATH +trees. For example, 'go list all' lists all the packages on the local +system. +

    +

    +- "std" is like all but expands to just the packages in the standard +Go library. +

    +

    +- "cmd" expands to the Go repository's commands and their +internal libraries. +

    +

    +Import paths beginning with "cmd/" only match source code in +the Go repository. +

    +

    +An import path is a pattern if it includes one or more "..." wildcards, +each of which can match any string, including the empty string and +strings containing slashes. Such a pattern expands to all package +directories found in the GOPATH trees with names matching the +patterns. As a special case, x/... matches x as well as x's subdirectories. +For example, net/... expands to net and packages in its subdirectories. +

    +

    +An import path can also name a package to be downloaded from +a remote repository. Run 'go help importpath' for details. +

    +

    +Every package in a program must have a unique import path. +By convention, this is arranged by starting each path with a +unique prefix that belongs to you. For example, paths used +internally at Google all begin with 'google', and paths +denoting remote repositories begin with the path to the code, +such as 'github.com/user/repo'. +

    +

    +Packages in a program need not have unique package names, +but there are two reserved package names with special meaning. +The name main indicates a command, not a library. +Commands are built into binaries and cannot be imported. +The name documentation indicates documentation for +a non-Go program in the directory. Files in package documentation +are ignored by the go command. +

    +

    +As a special case, if the package list is a list of .go files from a +single directory, the command is applied to a single synthesized +package made up of exactly those files, ignoring any build constraints +in those files and ignoring any other files in the directory. +

    +

    +Directory and file names that begin with "." or "_" are ignored +by the go tool, as are directories named "testdata". +

    +

    Description of testing flags

    +

    +The 'go test' command takes both flags that apply to 'go test' itself +and flags that apply to the resulting test binary. +

    +

    +Several of the flags control profiling and write an execution profile +suitable for "go tool pprof"; run "go tool pprof -h" for more +information. The --alloc_space, --alloc_objects, and --show_bytes +options of pprof control how the information is presented. +

    +

    +The following flags are recognized by the 'go test' command and +control the execution of any test: +

    +
    -bench regexp
    +    Run (sub)benchmarks matching a regular expression.
    +    The given regular expression is split into smaller ones by
    +    top-level '/', where each must match the corresponding part of a
    +    benchmark's identifier.
    +    By default, no benchmarks run. To run all benchmarks,
    +    use '-bench .' or '-bench=.'.
    +
    +-benchtime t
    +    Run enough iterations of each benchmark to take t, specified
    +    as a time.Duration (for example, -benchtime 1h30s).
    +    The default is 1 second (1s).
    +
    +-count n
    +    Run each test and benchmark n times (default 1).
    +    If -cpu is set, run n times for each GOMAXPROCS value.
    +    Examples are always run once.
    +
    +-cover
    +    Enable coverage analysis.
    +
    +-covermode set,count,atomic
    +    Set the mode for coverage analysis for the package[s]
    +    being tested. The default is "set" unless -race is enabled,
    +    in which case it is "atomic".
    +    The values:
    +	set: bool: does this statement run?
    +	count: int: how many times does this statement run?
    +	atomic: int: count, but correct in multithreaded tests;
    +		significantly more expensive.
    +    Sets -cover.
    +
    +-coverpkg pkg1,pkg2,pkg3
    +    Apply coverage analysis in each test to the given list of packages.
    +    The default is for each test to analyze only the package being tested.
    +    Packages are specified as import paths.
    +    Sets -cover.
    +
    +-cpu 1,2,4
    +    Specify a list of GOMAXPROCS values for which the tests or
    +    benchmarks should be executed.  The default is the current value
    +    of GOMAXPROCS.
    +
    +-parallel n
    +    Allow parallel execution of test functions that call t.Parallel.
    +    The value of this flag is the maximum number of tests to run
    +    simultaneously; by default, it is set to the value of GOMAXPROCS.
    +    Note that -parallel only applies within a single test binary.
    +    The 'go test' command may run tests for different packages
    +    in parallel as well, according to the setting of the -p flag
    +    (see 'go help build').
    +
    +-run regexp
    +    Run only those tests and examples matching the regular expression.
    +    For tests the regular expression is split into smaller ones by
    +    top-level '/', where each must match the corresponding part of a
    +    test's identifier.
    +
    +-short
    +    Tell long-running tests to shorten their run time.
    +    It is off by default but set during all.bash so that installing
    +    the Go tree can run a sanity check but not spend time running
    +    exhaustive tests.
    +
    +-timeout t
    +    If a test runs longer than t, panic.
    +    The default is 10 minutes (10m).
    +
    +-v
    +    Verbose output: log all tests as they are run. Also print all
    +    text from Log and Logf calls even if the test succeeds.
    +
    +

    +The following flags are also recognized by 'go test' and can be used to +profile the tests during execution: +

    +
    -benchmem
    +    Print memory allocation statistics for benchmarks.
    +
    +-blockprofile block.out
    +    Write a goroutine blocking profile to the specified file
    +    when all tests are complete.
    +    Writes test binary as -c would.
    +
    +-blockprofilerate n
    +    Control the detail provided in goroutine blocking profiles by
    +    calling runtime.SetBlockProfileRate with n.
    +    See 'go doc runtime.SetBlockProfileRate'.
    +    The profiler aims to sample, on average, one blocking event every
    +    n nanoseconds the program spends blocked.  By default,
    +    if -test.blockprofile is set without this flag, all blocking events
    +    are recorded, equivalent to -test.blockprofilerate=1.
    +
    +-coverprofile cover.out
    +    Write a coverage profile to the file after all tests have passed.
    +    Sets -cover.
    +
    +-cpuprofile cpu.out
    +    Write a CPU profile to the specified file before exiting.
    +    Writes test binary as -c would.
    +
    +-memprofile mem.out
    +    Write a memory profile to the file after all tests have passed.
    +    Writes test binary as -c would.
    +
    +-memprofilerate n
    +    Enable more precise (and expensive) memory profiles by setting
    +    runtime.MemProfileRate.  See 'go doc runtime.MemProfileRate'.
    +    To profile all memory allocations, use -test.memprofilerate=1
    +    and pass --alloc_space flag to the pprof tool.
    +
    +-mutexprofile mutex.out
    +    Write a mutex contention profile to the specified file
    +    when all tests are complete.
    +    Writes test binary as -c would.
    +
    +-mutexprofilefraction n
    +    Sample 1 in n stack traces of goroutines holding a
    +    contended mutex.
    +
    +-outputdir directory
    +    Place output files from profiling in the specified directory,
    +    by default the directory in which "go test" is running.
    +
    +-trace trace.out
    +    Write an execution trace to the specified file before exiting.
    +
    +

    +Each of these flags is also recognized with an optional 'test.' prefix, +as in -test.v. When invoking the generated test binary (the result of +'go test -c') directly, however, the prefix is mandatory. +

    +

    +The 'go test' command rewrites or removes recognized flags, +as appropriate, both before and after the optional package list, +before invoking the test binary. +

    +

    +For instance, the command +

    +
    go test -v -myflag testdata -cpuprofile=prof.out -x
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test -test.v -myflag testdata -test.cpuprofile=prof.out
    +
    +

    +(The -x flag is removed because it applies only to the go command's +execution, not to the test itself.) +

    +

    +The test flags that generate profiles (other than for coverage) also +leave the test binary in pkg.test for use when analyzing the profiles. +

    +

    +When 'go test' runs a test binary, it does so from within the +corresponding package's source code directory. Depending on the test, +it may be necessary to do the same when invoking a generated test +binary directly. +

    +

    +The command-line package list, if present, must appear before any +flag not known to the go test command. Continuing the example above, +the package list would have to appear before -myflag, but could appear +on either side of -v. +

    +

    +To keep an argument for a test binary from being interpreted as a +known flag or a package name, use -args (see 'go help test') which +passes the remainder of the command line through to the test binary +uninterpreted and unaltered. +

    +

    +For instance, the command +

    +
    go test -v -args -x -v
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test -test.v -x -v
    +
    +

    +Similarly, +

    +
    go test -args math
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test math
    +
    +

    +In the first example, the -x and the second -v are passed through to the +test binary unchanged and with no effect on the go command itself. +In the second example, the argument math is passed through to the test +binary, instead of being interpreted as the package list. +

    +

    Description of testing functions

    +

    +The 'go test' command expects to find test, benchmark, and example functions +in the "*_test.go" files corresponding to the package under test. +

    +

    +A test function is one named TestXXX (where XXX is any alphanumeric string +not starting with a lower case letter) and should have the signature, +

    +
    func TestXXX(t *testing.T) { ... }
    +
    +

    +A benchmark function is one named BenchmarkXXX and should have the signature, +

    +
    func BenchmarkXXX(b *testing.B) { ... }
    +
    +

    +An example function is similar to a test function but, instead of using +*testing.T to report success or failure, prints output to os.Stdout. +If the last comment in the function starts with "Output:" then the output +is compared exactly against the comment (see examples below). If the last +comment begins with "Unordered output:" then the output is compared to the +comment, however the order of the lines is ignored. An example with no such +comment is compiled but not executed. An example with no text after +"Output:" is compiled, executed, and expected to produce no output. +

    +

    +Godoc displays the body of ExampleXXX to demonstrate the use +of the function, constant, or variable XXX. An example of a method M with +receiver type T or *T is named ExampleT_M. There may be multiple examples +for a given function, constant, or variable, distinguished by a trailing _xxx, +where xxx is a suffix not beginning with an upper case letter. +

    +

    +Here is an example of an example: +

    +
    func ExamplePrintln() {
    +	Println("The output of\nthis example.")
    +	// Output: The output of
    +	// this example.
    +}
    +
    +

    +Here is another example where the ordering of the output is ignored: +

    +
    func ExamplePerm() {
    +	for _, value := range Perm(4) {
    +		fmt.Println(value)
    +	}
    +
    +	// Unordered output: 4
    +	// 2
    +	// 1
    +	// 3
    +	// 0
    +}
    +
    +

    +The entire test file is presented as the example when it contains a single +example function, at least one other function, type, variable, or constant +declaration, and no test or benchmark functions. +

    +

    +See the documentation of the testing package for more information. +

    + + + +
    +
    + + + + + + + + +`)) diff --git a/vendor/golang.org/x/net/http2/h2i/README.md b/vendor/golang.org/x/net/http2/h2i/README.md new file mode 100644 index 000000000..fb5c5efb0 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2i/README.md @@ -0,0 +1,97 @@ +# h2i + +**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol' +days of telnetting to your HTTP/1.n servers? We're bringing you +back. + +Features: +- send raw HTTP/2 frames + - PING + - SETTINGS + - HEADERS + - etc +- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2 +- pretty print all received HTTP/2 frames from the peer (including HPACK decoding) +- tab completion of commands, options + +Not yet features, but soon: +- unnecessary CONTINUATION frames on short boundaries, to test peer implementations +- request bodies (DATA frames) +- send invalid frames for testing server implementations (supported by underlying Framer) + +Later: +- act like a server + +## Installation + +``` +$ go get golang.org/x/net/http2/h2i +$ h2i +``` + +## Demo + +``` +$ h2i +Usage: h2i + + -insecure + Whether to skip TLS cert validation + -nextproto string + Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14") + +$ h2i google.com +Connecting to google.com:443 ... +Connected to 74.125.224.41:443 +Negotiated protocol "h2-14" +[FrameHeader SETTINGS len=18] + [MAX_CONCURRENT_STREAMS = 100] + [INITIAL_WINDOW_SIZE = 1048576] + [MAX_FRAME_SIZE = 16384] +[FrameHeader WINDOW_UPDATE len=4] + Window-Increment = 983041 + +h2i> PING h2iSayHI +[FrameHeader PING flags=ACK len=8] + Data = "h2iSayHI" +h2i> headers +(as HTTP/1.1)> GET / HTTP/1.1 +(as HTTP/1.1)> Host: ip.appspot.com +(as HTTP/1.1)> User-Agent: h2i/brad-n-blake +(as HTTP/1.1)> +Opening Stream-ID 1: + :authority = ip.appspot.com + :method = GET + :path = / + :scheme = https + user-agent = h2i/brad-n-blake +[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77] + :status = "200" + alternate-protocol = "443:quic,p=1" + content-length = "15" + content-type = "text/html" + date = "Fri, 01 May 2015 23:06:56 GMT" + server = "Google Frontend" +[FrameHeader DATA flags=END_STREAM stream=1 len=15] + "173.164.155.78\n" +[FrameHeader PING len=8] + Data = "\x00\x00\x00\x00\x00\x00\x00\x00" +h2i> ping +[FrameHeader PING flags=ACK len=8] + Data = "h2i_ping" +h2i> ping +[FrameHeader PING flags=ACK len=8] + Data = "h2i_ping" +h2i> ping +[FrameHeader GOAWAY len=22] + Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1) + +ReadFrame: EOF +``` + +## Status + +Quick few hour hack. So much yet to do. Feel free to file issues for +bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/) +and I aren't yet accepting pull requests until things settle down. + diff --git a/vendor/golang.org/x/net/http2/h2i/h2i.go b/vendor/golang.org/x/net/http2/h2i/h2i.go new file mode 100644 index 000000000..62e57527c --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2i/h2i.go @@ -0,0 +1,522 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!solaris + +/* +The h2i command is an interactive HTTP/2 console. + +Usage: + $ h2i [flags] + +Interactive commands in the console: (all parts case-insensitive) + + ping [data] + settings ack + settings FOO=n BAR=z + headers (open a new stream by typing HTTP/1.1) +*/ +package main + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "regexp" + "strconv" + "strings" + + "golang.org/x/crypto/ssh/terminal" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +// Flags +var ( + flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.") + flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation") + flagSettings = flag.String("settings", "empty", "comma-separated list of KEY=value settings for the initial SETTINGS frame. The magic value 'empty' sends an empty initial settings frame, and the magic value 'omit' causes no initial settings frame to be sent.") + flagDial = flag.String("dial", "", "optional ip:port to dial, to connect to a host:port but use a different SNI name (including a SNI name without DNS)") +) + +type command struct { + run func(*h2i, []string) error // required + + // complete optionally specifies tokens (case-insensitive) which are + // valid for this subcommand. + complete func() []string +} + +var commands = map[string]command{ + "ping": {run: (*h2i).cmdPing}, + "settings": { + run: (*h2i).cmdSettings, + complete: func() []string { + return []string{ + "ACK", + http2.SettingHeaderTableSize.String(), + http2.SettingEnablePush.String(), + http2.SettingMaxConcurrentStreams.String(), + http2.SettingInitialWindowSize.String(), + http2.SettingMaxFrameSize.String(), + http2.SettingMaxHeaderListSize.String(), + } + }, + }, + "quit": {run: (*h2i).cmdQuit}, + "headers": {run: (*h2i).cmdHeaders}, +} + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: h2i \n\n") + flag.PrintDefaults() +} + +// withPort adds ":443" if another port isn't already present. +func withPort(host string) string { + if _, _, err := net.SplitHostPort(host); err != nil { + return net.JoinHostPort(host, "443") + } + return host +} + +// withoutPort strips the port from addr if present. +func withoutPort(addr string) string { + if h, _, err := net.SplitHostPort(addr); err == nil { + return h + } + return addr +} + +// h2i is the app's state. +type h2i struct { + host string + tc *tls.Conn + framer *http2.Framer + term *terminal.Terminal + + // owned by the command loop: + streamID uint32 + hbuf bytes.Buffer + henc *hpack.Encoder + + // owned by the readFrames loop: + peerSetting map[http2.SettingID]uint32 + hdec *hpack.Decoder +} + +func main() { + flag.Usage = usage + flag.Parse() + if flag.NArg() != 1 { + usage() + os.Exit(2) + } + log.SetFlags(0) + + host := flag.Arg(0) + app := &h2i{ + host: host, + peerSetting: make(map[http2.SettingID]uint32), + } + app.henc = hpack.NewEncoder(&app.hbuf) + + if err := app.Main(); err != nil { + if app.term != nil { + app.logf("%v\n", err) + } else { + fmt.Fprintf(os.Stderr, "%v\n", err) + } + os.Exit(1) + } + fmt.Fprintf(os.Stdout, "\n") +} + +func (app *h2i) Main() error { + cfg := &tls.Config{ + ServerName: withoutPort(app.host), + NextProtos: strings.Split(*flagNextProto, ","), + InsecureSkipVerify: *flagInsecure, + } + + hostAndPort := *flagDial + if hostAndPort == "" { + hostAndPort = withPort(app.host) + } + log.Printf("Connecting to %s ...", hostAndPort) + tc, err := tls.Dial("tcp", hostAndPort, cfg) + if err != nil { + return fmt.Errorf("Error dialing %s: %v", hostAndPort, err) + } + log.Printf("Connected to %v", tc.RemoteAddr()) + defer tc.Close() + + if err := tc.Handshake(); err != nil { + return fmt.Errorf("TLS handshake: %v", err) + } + if !*flagInsecure { + if err := tc.VerifyHostname(app.host); err != nil { + return fmt.Errorf("VerifyHostname: %v", err) + } + } + state := tc.ConnectionState() + log.Printf("Negotiated protocol %q", state.NegotiatedProtocol) + if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" { + return fmt.Errorf("Could not negotiate protocol mutually") + } + + if _, err := io.WriteString(tc, http2.ClientPreface); err != nil { + return err + } + + app.framer = http2.NewFramer(tc, tc) + + oldState, err := terminal.MakeRaw(int(os.Stdin.Fd())) + if err != nil { + return err + } + defer terminal.Restore(0, oldState) + + var screen = struct { + io.Reader + io.Writer + }{os.Stdin, os.Stdout} + + app.term = terminal.NewTerminal(screen, "h2i> ") + lastWord := regexp.MustCompile(`.+\W(\w+)$`) + app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) { + if key != '\t' { + return + } + if pos != len(line) { + // TODO: we're being lazy for now, only supporting tab completion at the end. + return + } + // Auto-complete for the command itself. + if !strings.Contains(line, " ") { + var name string + name, _, ok = lookupCommand(line) + if !ok { + return + } + return name, len(name), true + } + _, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')]) + if !ok || c.complete == nil { + return + } + if strings.HasSuffix(line, " ") { + app.logf("%s", strings.Join(c.complete(), " ")) + return line, pos, true + } + m := lastWord.FindStringSubmatch(line) + if m == nil { + return line, len(line), true + } + soFar := m[1] + var match []string + for _, cand := range c.complete() { + if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) { + continue + } + match = append(match, cand) + } + if len(match) == 0 { + return + } + if len(match) > 1 { + // TODO: auto-complete any common prefix + app.logf("%s", strings.Join(match, " ")) + return line, pos, true + } + newLine = line[:len(line)-len(soFar)] + match[0] + return newLine, len(newLine), true + + } + + errc := make(chan error, 2) + go func() { errc <- app.readFrames() }() + go func() { errc <- app.readConsole() }() + return <-errc +} + +func (app *h2i) logf(format string, args ...interface{}) { + fmt.Fprintf(app.term, format+"\r\n", args...) +} + +func (app *h2i) readConsole() error { + if s := *flagSettings; s != "omit" { + var args []string + if s != "empty" { + args = strings.Split(s, ",") + } + _, c, ok := lookupCommand("settings") + if !ok { + panic("settings command not found") + } + c.run(app, args) + } + + for { + line, err := app.term.ReadLine() + if err == io.EOF { + return nil + } + if err != nil { + return fmt.Errorf("terminal.ReadLine: %v", err) + } + f := strings.Fields(line) + if len(f) == 0 { + continue + } + cmd, args := f[0], f[1:] + if _, c, ok := lookupCommand(cmd); ok { + err = c.run(app, args) + } else { + app.logf("Unknown command %q", line) + } + if err == errExitApp { + return nil + } + if err != nil { + return err + } + } +} + +func lookupCommand(prefix string) (name string, c command, ok bool) { + prefix = strings.ToLower(prefix) + if c, ok = commands[prefix]; ok { + return prefix, c, ok + } + + for full, candidate := range commands { + if strings.HasPrefix(full, prefix) { + if c.run != nil { + return "", command{}, false // ambiguous + } + c = candidate + name = full + } + } + return name, c, c.run != nil +} + +var errExitApp = errors.New("internal sentinel error value to quit the console reading loop") + +func (a *h2i) cmdQuit(args []string) error { + if len(args) > 0 { + a.logf("the QUIT command takes no argument") + return nil + } + return errExitApp +} + +func (a *h2i) cmdSettings(args []string) error { + if len(args) == 1 && strings.EqualFold(args[0], "ACK") { + return a.framer.WriteSettingsAck() + } + var settings []http2.Setting + for _, arg := range args { + if strings.EqualFold(arg, "ACK") { + a.logf("Error: ACK must be only argument with the SETTINGS command") + return nil + } + eq := strings.Index(arg, "=") + if eq == -1 { + a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) + return nil + } + sid, ok := settingByName(arg[:eq]) + if !ok { + a.logf("Error: unknown setting name %q", arg[:eq]) + return nil + } + val, err := strconv.ParseUint(arg[eq+1:], 10, 32) + if err != nil { + a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) + return nil + } + settings = append(settings, http2.Setting{ + ID: sid, + Val: uint32(val), + }) + } + a.logf("Sending: %v", settings) + return a.framer.WriteSettings(settings...) +} + +func settingByName(name string) (http2.SettingID, bool) { + for _, sid := range [...]http2.SettingID{ + http2.SettingHeaderTableSize, + http2.SettingEnablePush, + http2.SettingMaxConcurrentStreams, + http2.SettingInitialWindowSize, + http2.SettingMaxFrameSize, + http2.SettingMaxHeaderListSize, + } { + if strings.EqualFold(sid.String(), name) { + return sid, true + } + } + return 0, false +} + +func (app *h2i) cmdPing(args []string) error { + if len(args) > 1 { + app.logf("invalid PING usage: only accepts 0 or 1 args") + return nil // nil means don't end the program + } + var data [8]byte + if len(args) == 1 { + copy(data[:], args[0]) + } else { + copy(data[:], "h2i_ping") + } + return app.framer.WritePing(false, data) +} + +func (app *h2i) cmdHeaders(args []string) error { + if len(args) > 0 { + app.logf("Error: HEADERS doesn't yet take arguments.") + // TODO: flags for restricting window size, to force CONTINUATION + // frames. + return nil + } + var h1req bytes.Buffer + app.term.SetPrompt("(as HTTP/1.1)> ") + defer app.term.SetPrompt("h2i> ") + for { + line, err := app.term.ReadLine() + if err != nil { + return err + } + h1req.WriteString(line) + h1req.WriteString("\r\n") + if line == "" { + break + } + } + req, err := http.ReadRequest(bufio.NewReader(&h1req)) + if err != nil { + app.logf("Invalid HTTP/1.1 request: %v", err) + return nil + } + if app.streamID == 0 { + app.streamID = 1 + } else { + app.streamID += 2 + } + app.logf("Opening Stream-ID %d:", app.streamID) + hbf := app.encodeHeaders(req) + if len(hbf) > 16<<10 { + app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go") + return nil + } + return app.framer.WriteHeaders(http2.HeadersFrameParam{ + StreamID: app.streamID, + BlockFragment: hbf, + EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now + EndHeaders: true, // for now + }) +} + +func (app *h2i) readFrames() error { + for { + f, err := app.framer.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame: %v", err) + } + app.logf("%v", f) + switch f := f.(type) { + case *http2.PingFrame: + app.logf(" Data = %q", f.Data) + case *http2.SettingsFrame: + f.ForeachSetting(func(s http2.Setting) error { + app.logf(" %v", s) + app.peerSetting[s.ID] = s.Val + return nil + }) + case *http2.WindowUpdateFrame: + app.logf(" Window-Increment = %v", f.Increment) + case *http2.GoAwayFrame: + app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)", f.LastStreamID, f.ErrCode, f.ErrCode) + case *http2.DataFrame: + app.logf(" %q", f.Data()) + case *http2.HeadersFrame: + if f.HasPriority() { + app.logf(" PRIORITY = %v", f.Priority) + } + if app.hdec == nil { + // TODO: if the user uses h2i to send a SETTINGS frame advertising + // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE + // and stuff here instead of using the 4k default. But for now: + tableSize := uint32(4 << 10) + app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) + } + app.hdec.Write(f.HeaderBlockFragment()) + case *http2.PushPromiseFrame: + if app.hdec == nil { + // TODO: if the user uses h2i to send a SETTINGS frame advertising + // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE + // and stuff here instead of using the 4k default. But for now: + tableSize := uint32(4 << 10) + app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) + } + app.hdec.Write(f.HeaderBlockFragment()) + } + } +} + +// called from readLoop +func (app *h2i) onNewHeaderField(f hpack.HeaderField) { + if f.Sensitive { + app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value) + } + app.logf(" %s = %q", f.Name, f.Value) +} + +func (app *h2i) encodeHeaders(req *http.Request) []byte { + app.hbuf.Reset() + + // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go + host := req.Host + if host == "" { + host = req.URL.Host + } + + path := req.RequestURI + if path == "" { + path = "/" + } + + app.writeHeader(":authority", host) // probably not right for all sites + app.writeHeader(":method", req.Method) + app.writeHeader(":path", path) + app.writeHeader(":scheme", "https") + + for k, vv := range req.Header { + lowKey := strings.ToLower(k) + if lowKey == "host" { + continue + } + for _, v := range vv { + app.writeHeader(lowKey, v) + } + } + return app.hbuf.Bytes() +} + +func (app *h2i) writeHeader(name, value string) { + app.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) + app.logf(" %s = %s", name, value) +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go new file mode 100644 index 000000000..c2805f6ac --- /dev/null +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -0,0 +1,78 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "net/http" + "strings" +) + +var ( + commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case + commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case +) + +func init() { + for _, v := range []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-origin", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "trailer", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + } { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) string { + if s, ok := commonLowerHeader[v]; ok { + return s + } + return strings.ToLower(v) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 000000000..1565cf270 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -0,0 +1,240 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.table.init() + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true + } + + j, nameValueMatch := e.dynTab.table.search(f) + if nameValueMatch || (i == 0 && j != 0) { + return j + uint64(staticTable.len()), nameValueMatch + } + + return i, false +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.Size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Inremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode_test.go b/vendor/golang.org/x/net/http2/hpack/encode_test.go new file mode 100644 index 000000000..05f12db9c --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode_test.go @@ -0,0 +1,386 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "encoding/hex" + "fmt" + "math/rand" + "reflect" + "strings" + "testing" +) + +func TestEncoderTableSizeUpdate(t *testing.T) { + tests := []struct { + size1, size2 uint32 + wantHex string + }{ + // Should emit 2 table size updates (2048 and 4096) + {2048, 4096, "3fe10f 3fe11f 82"}, + + // Should emit 1 table size update (2048) + {16384, 2048, "3fe10f 82"}, + } + for _, tt := range tests { + var buf bytes.Buffer + e := NewEncoder(&buf) + e.SetMaxDynamicTableSize(tt.size1) + e.SetMaxDynamicTableSize(tt.size2) + if err := e.WriteField(pair(":method", "GET")); err != nil { + t.Fatal(err) + } + want := removeSpace(tt.wantHex) + if got := hex.EncodeToString(buf.Bytes()); got != want { + t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want) + } + } +} + +func TestEncoderWriteField(t *testing.T) { + var buf bytes.Buffer + e := NewEncoder(&buf) + var got []HeaderField + d := NewDecoder(4<<10, func(f HeaderField) { + got = append(got, f) + }) + + tests := []struct { + hdrs []HeaderField + }{ + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }}, + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }}, + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }}, + } + for i, tt := range tests { + buf.Reset() + got = got[:0] + for _, hf := range tt.hdrs { + if err := e.WriteField(hf); err != nil { + t.Fatal(err) + } + } + _, err := d.Write(buf.Bytes()) + if err != nil { + t.Errorf("%d. Decoder Write = %v", i, err) + } + if !reflect.DeepEqual(got, tt.hdrs) { + t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs) + } + } +} + +func TestEncoderSearchTable(t *testing.T) { + e := NewEncoder(nil) + + e.dynTab.add(pair("foo", "bar")) + e.dynTab.add(pair("blake", "miz")) + e.dynTab.add(pair(":method", "GET")) + + tests := []struct { + hf HeaderField + wantI uint64 + wantMatch bool + }{ + // Name and Value match + {pair("foo", "bar"), uint64(staticTable.len()) + 3, true}, + {pair("blake", "miz"), uint64(staticTable.len()) + 2, true}, + {pair(":method", "GET"), 2, true}, + + // Only name match because Sensitive == true. This is allowed to match + // any ":method" entry. The current implementation uses the last entry + // added in newStaticTable. + {HeaderField{":method", "GET", true}, 3, false}, + + // Only Name matches + {pair("foo", "..."), uint64(staticTable.len()) + 3, false}, + {pair("blake", "..."), uint64(staticTable.len()) + 2, false}, + // As before, this is allowed to match any ":method" entry. + {pair(":method", "..."), 3, false}, + + // None match + {pair("foo-", "bar"), 0, false}, + } + for _, tt := range tests { + if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch { + t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch) + } + } +} + +func TestAppendVarInt(t *testing.T) { + tests := []struct { + n byte + i uint64 + want []byte + }{ + // Fits in a byte: + {1, 0, []byte{0}}, + {2, 2, []byte{2}}, + {3, 6, []byte{6}}, + {4, 14, []byte{14}}, + {5, 30, []byte{30}}, + {6, 62, []byte{62}}, + {7, 126, []byte{126}}, + {8, 254, []byte{254}}, + + // Multiple bytes: + {5, 1337, []byte{31, 154, 10}}, + } + for _, tt := range tests { + got := appendVarInt(nil, tt.n, tt.i) + if !bytes.Equal(got, tt.want) { + t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want) + } + } +} + +func TestAppendHpackString(t *testing.T) { + tests := []struct { + s, wantHex string + }{ + // Huffman encoded + {"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, + + // Not Huffman encoded + {"a", "01 61"}, + + // zero length + {"", "00"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendHpackString(nil, tt.s) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want) + } + } +} + +func TestAppendIndexed(t *testing.T) { + tests := []struct { + i uint64 + wantHex string + }{ + // 1 byte + {1, "81"}, + {126, "fe"}, + + // 2 bytes + {127, "ff00"}, + {128, "ff01"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendIndexed(nil, tt.i) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want) + } + } +} + +func TestAppendNewName(t *testing.T) { + tests := []struct { + f HeaderField + indexing bool + wantHex string + }{ + // Incremental indexing + {HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + + // Without indexing + {HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + + // Never indexed + {HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + {HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendNewName(nil, tt.f, tt.indexing) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) + } + } +} + +func TestAppendIndexedName(t *testing.T) { + tests := []struct { + f HeaderField + i uint64 + indexing bool + wantHex string + }{ + // Incremental indexing + {HeaderField{":status", "302", false}, 8, true, "48 82 6402"}, + + // Without indexing + {HeaderField{":status", "302", false}, 8, false, "08 82 6402"}, + + // Never indexed + {HeaderField{":status", "302", true}, 8, true, "18 82 6402"}, + {HeaderField{":status", "302", true}, 8, false, "18 82 6402"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) + } + } +} + +func TestAppendTableSize(t *testing.T) { + tests := []struct { + i uint32 + wantHex string + }{ + // Fits into 1 byte + {30, "3e"}, + + // Extra byte + {31, "3f00"}, + {32, "3f01"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendTableSize(nil, tt.i) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want) + } + } +} + +func TestEncoderSetMaxDynamicTableSize(t *testing.T) { + var buf bytes.Buffer + e := NewEncoder(&buf) + tests := []struct { + v uint32 + wantUpdate bool + wantMinSize uint32 + wantMaxSize uint32 + }{ + // Set new table size to 2048 + {2048, true, 2048, 2048}, + + // Set new table size to 16384, but still limited to + // 4096 + {16384, true, 2048, 4096}, + } + for _, tt := range tests { + e.SetMaxDynamicTableSize(tt.v) + if got := e.tableSizeUpdate; tt.wantUpdate != got { + t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate) + } + if got := e.minSize; tt.wantMinSize != got { + t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize) + } + if got := e.dynTab.maxSize; tt.wantMaxSize != got { + t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize) + } + } +} + +func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) { + e := NewEncoder(nil) + // 4095 < initialHeaderTableSize means maxSize is truncated to + // 4095. + e.SetMaxDynamicTableSizeLimit(4095) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + if got, want := e.maxSizeLimit, uint32(4095); got != want { + t.Errorf("e.maxSizeLimit = %v; want %v", got, want) + } + if got, want := e.tableSizeUpdate, true; got != want { + t.Errorf("e.tableSizeUpdate = %v; want %v", got, want) + } + // maxSize will be truncated to maxSizeLimit + e.SetMaxDynamicTableSize(16384) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + // 8192 > current maxSizeLimit, so maxSize does not change. + e.SetMaxDynamicTableSizeLimit(8192) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + if got, want := e.maxSizeLimit, uint32(8192); got != want { + t.Errorf("e.maxSizeLimit = %v; want %v", got, want) + } +} + +func removeSpace(s string) string { + return strings.Replace(s, " ", "", -1) +} + +func BenchmarkEncoderSearchTable(b *testing.B) { + e := NewEncoder(nil) + + // A sample of possible header fields. + // This is not based on any actual data from HTTP/2 traces. + var possible []HeaderField + for _, f := range staticTable.ents { + if f.Value == "" { + possible = append(possible, f) + continue + } + // Generate 5 random values, except for cookie and set-cookie, + // which we know can have many values in practice. + num := 5 + if f.Name == "cookie" || f.Name == "set-cookie" { + num = 25 + } + for i := 0; i < num; i++ { + f.Value = fmt.Sprintf("%s-%d", f.Name, i) + possible = append(possible, f) + } + } + for k := 0; k < 10; k++ { + f := HeaderField{ + Name: fmt.Sprintf("x-header-%d", k), + Sensitive: rand.Int()%2 == 0, + } + for i := 0; i < 5; i++ { + f.Value = fmt.Sprintf("%s-%d", f.Name, i) + possible = append(possible, f) + } + } + + // Add a random sample to the dynamic table. This very loosely simulates + // a history of 100 requests with 20 header fields per request. + for r := 0; r < 100*20; r++ { + f := possible[rand.Int31n(int32(len(possible)))] + // Skip if this is in the staticTable verbatim. + if _, has := staticTable.search(f); !has { + e.dynTab.add(f) + } + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + for _, f := range possible { + e.searchTable(f) + } + } +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 000000000..176644acd --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -0,0 +1,490 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7541 section 4.1. +func (hf HeaderField) Size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer +} + +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + emitEnabled: true, + } + d.dynTab.table.init() + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) + return d +} + +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + table headerFieldTable + size uint32 // in bytes + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +func (dt *dynamicTable) add(f HeaderField) { + dt.table.addEntry(f) + dt.size += f.Size() + dt.evict() +} + +// If we're too big, evict old stuff. +func (dt *dynamicTable) evict() { + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ + } + dt.table.evictOldest(n) +} + +func (d *Decoder) maxTableIndex() int { + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + // See Section 2.3.3. + if i == 0 { + return + } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } + if i > uint64(d.maxTableIndex()) { + return + } + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength + } + d.saveBuf.Write(d.buf) + return len(p), nil + } + if err != nil { + break + } + } + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.buf = buf + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + wantStr := d.emitEnabled || it.indexed() + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + } + hf.Value, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil + } + + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC + } + return s, p[strLen:], nil +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack_test.go b/vendor/golang.org/x/net/http2/hpack/hpack_test.go new file mode 100644 index 000000000..bc7f47678 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack_test.go @@ -0,0 +1,722 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "encoding/hex" + "fmt" + "math/rand" + "reflect" + "strings" + "testing" + "time" +) + +func (d *Decoder) mustAt(idx int) HeaderField { + if hf, ok := d.at(uint64(idx)); !ok { + panic(fmt.Sprintf("bogus index %d", idx)) + } else { + return hf + } +} + +func TestDynamicTableAt(t *testing.T) { + d := NewDecoder(4096, nil) + at := d.mustAt + if got, want := at(2), (pair(":method", "GET")); got != want { + t.Errorf("at(2) = %v; want %v", got, want) + } + d.dynTab.add(pair("foo", "bar")) + d.dynTab.add(pair("blake", "miz")) + if got, want := at(staticTable.len()+1), (pair("blake", "miz")); got != want { + t.Errorf("at(dyn 1) = %v; want %v", got, want) + } + if got, want := at(staticTable.len()+2), (pair("foo", "bar")); got != want { + t.Errorf("at(dyn 2) = %v; want %v", got, want) + } + if got, want := at(3), (pair(":method", "POST")); got != want { + t.Errorf("at(3) = %v; want %v", got, want) + } +} + +func TestDynamicTableSizeEvict(t *testing.T) { + d := NewDecoder(4096, nil) + if want := uint32(0); d.dynTab.size != want { + t.Fatalf("size = %d; want %d", d.dynTab.size, want) + } + add := d.dynTab.add + add(pair("blake", "eats pizza")) + if want := uint32(15 + 32); d.dynTab.size != want { + t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want) + } + add(pair("foo", "bar")) + if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want { + t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want) + } + d.dynTab.setMaxSize(15 + 32 + 1 /* slop */) + if want := uint32(6 + 32); d.dynTab.size != want { + t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want) + } + if got, want := d.mustAt(staticTable.len()+1), (pair("foo", "bar")); got != want { + t.Errorf("at(dyn 1) = %v; want %v", got, want) + } + add(pair("long", strings.Repeat("x", 500))) + if want := uint32(0); d.dynTab.size != want { + t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want) + } +} + +func TestDecoderDecode(t *testing.T) { + tests := []struct { + name string + in []byte + want []HeaderField + wantDynTab []HeaderField // newest entry first + }{ + // C.2.1 Literal Header Field with Indexing + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1 + {"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"), + []HeaderField{pair("custom-key", "custom-header")}, + []HeaderField{pair("custom-key", "custom-header")}, + }, + + // C.2.2 Literal Header Field without Indexing + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2 + {"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"), + []HeaderField{pair(":path", "/sample/path")}, + []HeaderField{}}, + + // C.2.3 Literal Header Field never Indexed + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3 + {"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"), + []HeaderField{{"password", "secret", true}}, + []HeaderField{}}, + + // C.2.4 Indexed Header Field + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4 + {"C.2.4", []byte("\x82"), + []HeaderField{pair(":method", "GET")}, + []HeaderField{}}, + } + for _, tt := range tests { + d := NewDecoder(4096, nil) + hf, err := d.DecodeFull(tt.in) + if err != nil { + t.Errorf("%s: %v", tt.name, err) + continue + } + if !reflect.DeepEqual(hf, tt.want) { + t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want) + } + gotDynTab := d.dynTab.reverseCopy() + if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) { + t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab) + } + } +} + +func (dt *dynamicTable) reverseCopy() (hf []HeaderField) { + hf = make([]HeaderField, len(dt.table.ents)) + for i := range hf { + hf[i] = dt.table.ents[len(dt.table.ents)-1-i] + } + return +} + +type encAndWant struct { + enc []byte + want []HeaderField + wantDynTab []HeaderField + wantDynSize uint32 +} + +// C.3 Request Examples without Huffman Coding +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3 +func TestDecodeC3_NoHuffman(t *testing.T) { + testDecodeSeries(t, 4096, []encAndWant{ + {dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }, + []HeaderField{ + pair(":authority", "www.example.com"), + }, + 57, + }, + {dehex("8286 84be 5808 6e6f 2d63 6163 6865"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }, + []HeaderField{ + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 110, + }, + {dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }, + []HeaderField{ + pair("custom-key", "custom-value"), + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 164, + }, + }) +} + +// C.4 Request Examples with Huffman Coding +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4 +func TestDecodeC4_Huffman(t *testing.T) { + testDecodeSeries(t, 4096, []encAndWant{ + {dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }, + []HeaderField{ + pair(":authority", "www.example.com"), + }, + 57, + }, + {dehex("8286 84be 5886 a8eb 1064 9cbf"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }, + []HeaderField{ + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 110, + }, + {dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }, + []HeaderField{ + pair("custom-key", "custom-value"), + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 164, + }, + }) +} + +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5 +// "This section shows several consecutive header lists, corresponding +// to HTTP responses, on the same connection. The HTTP/2 setting +// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 +// octets, causing some evictions to occur." +func TestDecodeC5_ResponsesNoHuff(t *testing.T) { + testDecodeSeries(t, 256, []encAndWant{ + {dehex(` +4803 3330 3258 0770 7269 7661 7465 611d +4d6f 6e2c 2032 3120 4f63 7420 3230 3133 +2032 303a 3133 3a32 3120 474d 546e 1768 +7474 7073 3a2f 2f77 7777 2e65 7861 6d70 +6c65 2e63 6f6d +`), + []HeaderField{ + pair(":status", "302"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + pair(":status", "302"), + }, + 222, + }, + {dehex("4803 3330 37c1 c0bf"), + []HeaderField{ + pair(":status", "307"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair(":status", "307"), + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + }, + 222, + }, + {dehex(` +88c1 611d 4d6f 6e2c 2032 3120 4f63 7420 +3230 3133 2032 303a 3133 3a32 3220 474d +54c0 5a04 677a 6970 7738 666f 6f3d 4153 +444a 4b48 514b 425a 584f 5157 454f 5049 +5541 5851 5745 4f49 553b 206d 6178 2d61 +6765 3d33 3630 303b 2076 6572 7369 6f6e +3d31 +`), + []HeaderField{ + pair(":status", "200"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + pair("location", "https://www.example.com"), + pair("content-encoding", "gzip"), + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + }, + []HeaderField{ + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + pair("content-encoding", "gzip"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + }, + 215, + }, + }) +} + +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6 +// "This section shows the same examples as the previous section, but +// using Huffman encoding for the literal values. The HTTP/2 setting +// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 +// octets, causing some evictions to occur. The eviction mechanism +// uses the length of the decoded literal values, so the same +// evictions occurs as in the previous section." +func TestDecodeC6_ResponsesHuffman(t *testing.T) { + testDecodeSeries(t, 256, []encAndWant{ + {dehex(` +4882 6402 5885 aec3 771a 4b61 96d0 7abe +9410 54d4 44a8 2005 9504 0b81 66e0 82a6 +2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8 +e9ae 82ae 43d3 +`), + []HeaderField{ + pair(":status", "302"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + pair(":status", "302"), + }, + 222, + }, + {dehex("4883 640e ffc1 c0bf"), + []HeaderField{ + pair(":status", "307"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair(":status", "307"), + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + }, + 222, + }, + {dehex(` +88c1 6196 d07a be94 1054 d444 a820 0595 +040b 8166 e084 a62d 1bff c05a 839b d9ab +77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b +3960 d5af 2708 7f36 72c1 ab27 0fb5 291f +9587 3160 65c0 03ed 4ee5 b106 3d50 07 +`), + []HeaderField{ + pair(":status", "200"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + pair("location", "https://www.example.com"), + pair("content-encoding", "gzip"), + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + }, + []HeaderField{ + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + pair("content-encoding", "gzip"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + }, + 215, + }, + }) +} + +func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) { + d := NewDecoder(size, nil) + for i, step := range steps { + hf, err := d.DecodeFull(step.enc) + if err != nil { + t.Fatalf("Error at step index %d: %v", i, err) + } + if !reflect.DeepEqual(hf, step.want) { + t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want) + } + gotDynTab := d.dynTab.reverseCopy() + if !reflect.DeepEqual(gotDynTab, step.wantDynTab) { + t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab) + } + if d.dynTab.size != step.wantDynSize { + t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize) + } + } +} + +func TestHuffmanDecodeExcessPadding(t *testing.T) { + tests := [][]byte{ + {0xff}, // Padding Exceeds 7 bits + {0x1f, 0xff}, // {"a", 1 byte excess padding} + {0x1f, 0xff, 0xff}, // {"a", 2 byte excess padding} + {0x1f, 0xff, 0xff, 0xff}, // {"a", 3 byte excess padding} + {0xff, 0x9f, 0xff, 0xff, 0xff}, // {"a", 29 bit excess padding} + {'R', 0xbc, '0', 0xff, 0xff, 0xff, 0xff}, // Padding ends on partial symbol. + } + for i, in := range tests { + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("test-%d: decode(%q) = %v; want ErrInvalidHuffman", i, in, err) + } + } +} + +func TestHuffmanDecodeEOS(t *testing.T) { + in := []byte{0xff, 0xff, 0xff, 0xff, 0xfc} // {EOS, "?"} + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("error = %v; want ErrInvalidHuffman", err) + } +} + +func TestHuffmanDecodeMaxLengthOnTrailingByte(t *testing.T) { + in := []byte{0x00, 0x01} // {"0", "0", "0"} + var buf bytes.Buffer + if err := huffmanDecode(&buf, 2, in); err != ErrStringLength { + t.Errorf("error = %v; want ErrStringLength", err) + } +} + +func TestHuffmanDecodeCorruptPadding(t *testing.T) { + in := []byte{0x00} + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("error = %v; want ErrInvalidHuffman", err) + } +} + +func TestHuffmanDecode(t *testing.T) { + tests := []struct { + inHex, want string + }{ + {"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"}, + {"a8eb 1064 9cbf", "no-cache"}, + {"25a8 49e9 5ba9 7d7f", "custom-key"}, + {"25a8 49e9 5bb8 e8b4 bf", "custom-value"}, + {"6402", "302"}, + {"aec3 771a 4b", "private"}, + {"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"}, + {"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"}, + {"9bd9 ab", "gzip"}, + {"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07", + "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"}, + } + for i, tt := range tests { + var buf bytes.Buffer + in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1)) + if err != nil { + t.Errorf("%d. hex input error: %v", i, err) + continue + } + if _, err := HuffmanDecode(&buf, in); err != nil { + t.Errorf("%d. decode error: %v", i, err) + continue + } + if got := buf.String(); tt.want != got { + t.Errorf("%d. decode = %q; want %q", i, got, tt.want) + } + } +} + +func TestAppendHuffmanString(t *testing.T) { + tests := []struct { + in, want string + }{ + {"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, + {"no-cache", "a8eb 1064 9cbf"}, + {"custom-key", "25a8 49e9 5ba9 7d7f"}, + {"custom-value", "25a8 49e9 5bb8 e8b4 bf"}, + {"302", "6402"}, + {"private", "aec3 771a 4b"}, + {"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"}, + {"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"}, + {"gzip", "9bd9 ab"}, + {"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", + "94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"}, + } + for i, tt := range tests { + buf := []byte{} + want := strings.Replace(tt.want, " ", "", -1) + buf = AppendHuffmanString(buf, tt.in) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("%d. encode = %q; want %q", i, got, want) + } + } +} + +func TestHuffmanMaxStrLen(t *testing.T) { + const msg = "Some string" + huff := AppendHuffmanString(nil, msg) + + testGood := func(max int) { + var out bytes.Buffer + if err := huffmanDecode(&out, max, huff); err != nil { + t.Errorf("For maxLen=%d, unexpected error: %v", max, err) + } + if out.String() != msg { + t.Errorf("For maxLen=%d, out = %q; want %q", max, out.String(), msg) + } + } + testGood(0) + testGood(len(msg)) + testGood(len(msg) + 1) + + var out bytes.Buffer + if err := huffmanDecode(&out, len(msg)-1, huff); err != ErrStringLength { + t.Errorf("err = %v; want ErrStringLength", err) + } +} + +func TestHuffmanRoundtripStress(t *testing.T) { + const Len = 50 // of uncompressed string + input := make([]byte, Len) + var output bytes.Buffer + var huff []byte + + n := 5000 + if testing.Short() { + n = 100 + } + seed := time.Now().UnixNano() + t.Logf("Seed = %v", seed) + src := rand.New(rand.NewSource(seed)) + var encSize int64 + for i := 0; i < n; i++ { + for l := range input { + input[l] = byte(src.Intn(256)) + } + huff = AppendHuffmanString(huff[:0], string(input)) + encSize += int64(len(huff)) + output.Reset() + if err := huffmanDecode(&output, 0, huff); err != nil { + t.Errorf("Failed to decode %q -> %q -> error %v", input, huff, err) + continue + } + if !bytes.Equal(output.Bytes(), input) { + t.Errorf("Roundtrip failure on %q -> %q -> %q", input, huff, output.Bytes()) + } + } + t.Logf("Compressed size of original: %0.02f%% (%v -> %v)", 100*(float64(encSize)/(Len*float64(n))), Len*n, encSize) +} + +func TestHuffmanDecodeFuzz(t *testing.T) { + const Len = 50 // of compressed + var buf, zbuf bytes.Buffer + + n := 5000 + if testing.Short() { + n = 100 + } + seed := time.Now().UnixNano() + t.Logf("Seed = %v", seed) + src := rand.New(rand.NewSource(seed)) + numFail := 0 + for i := 0; i < n; i++ { + zbuf.Reset() + if i == 0 { + // Start with at least one invalid one. + zbuf.WriteString("00\x91\xff\xff\xff\xff\xc8") + } else { + for l := 0; l < Len; l++ { + zbuf.WriteByte(byte(src.Intn(256))) + } + } + + buf.Reset() + if err := huffmanDecode(&buf, 0, zbuf.Bytes()); err != nil { + if err == ErrInvalidHuffman { + numFail++ + continue + } + t.Errorf("Failed to decode %q: %v", zbuf.Bytes(), err) + continue + } + } + t.Logf("%0.02f%% are invalid (%d / %d)", 100*float64(numFail)/float64(n), numFail, n) + if numFail < 1 { + t.Error("expected at least one invalid huffman encoding (test starts with one)") + } +} + +func TestReadVarInt(t *testing.T) { + type res struct { + i uint64 + consumed int + err error + } + tests := []struct { + n byte + p []byte + want res + }{ + // Fits in a byte: + {1, []byte{0}, res{0, 1, nil}}, + {2, []byte{2}, res{2, 1, nil}}, + {3, []byte{6}, res{6, 1, nil}}, + {4, []byte{14}, res{14, 1, nil}}, + {5, []byte{30}, res{30, 1, nil}}, + {6, []byte{62}, res{62, 1, nil}}, + {7, []byte{126}, res{126, 1, nil}}, + {8, []byte{254}, res{254, 1, nil}}, + + // Doesn't fit in a byte: + {1, []byte{1}, res{0, 0, errNeedMore}}, + {2, []byte{3}, res{0, 0, errNeedMore}}, + {3, []byte{7}, res{0, 0, errNeedMore}}, + {4, []byte{15}, res{0, 0, errNeedMore}}, + {5, []byte{31}, res{0, 0, errNeedMore}}, + {6, []byte{63}, res{0, 0, errNeedMore}}, + {7, []byte{127}, res{0, 0, errNeedMore}}, + {8, []byte{255}, res{0, 0, errNeedMore}}, + + // Ignoring top bits: + {5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111 + {5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100 + {5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101 + + // Extra byte: + {5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte + + // Short a byte: + {5, []byte{191, 154}, res{0, 0, errNeedMore}}, + + // integer overflow: + {1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}}, + } + for _, tt := range tests { + i, remain, err := readVarInt(tt.n, tt.p) + consumed := len(tt.p) - len(remain) + got := res{i, consumed, err} + if got != tt.want { + t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want) + } + } +} + +// Fuzz crash, originally reported at https://github.com/bradfitz/http2/issues/56 +func TestHuffmanFuzzCrash(t *testing.T) { + got, err := HuffmanDecodeToString([]byte("00\x91\xff\xff\xff\xff\xc8")) + if got != "" { + t.Errorf("Got %q; want empty string", got) + } + if err != ErrInvalidHuffman { + t.Errorf("Err = %v; want ErrInvalidHuffman", err) + } +} + +func pair(name, value string) HeaderField { + return HeaderField{Name: name, Value: value} +} + +func dehex(s string) []byte { + s = strings.Replace(s, " ", "", -1) + s = strings.Replace(s, "\n", "", -1) + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +func TestEmitEnabled(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + enc.WriteField(HeaderField{Name: "foo", Value: "bar"}) + enc.WriteField(HeaderField{Name: "foo", Value: "bar"}) + + numCallback := 0 + var dec *Decoder + dec = NewDecoder(8<<20, func(HeaderField) { + numCallback++ + dec.SetEmitEnabled(false) + }) + if !dec.EmitEnabled() { + t.Errorf("initial emit enabled = false; want true") + } + if _, err := dec.Write(buf.Bytes()); err != nil { + t.Error(err) + } + if numCallback != 1 { + t.Errorf("num callbacks = %d; want 1", numCallback) + } + if dec.EmitEnabled() { + t.Errorf("emit enabled = true; want false") + } +} + +func TestSaveBufLimit(t *testing.T) { + const maxStr = 1 << 10 + var got []HeaderField + dec := NewDecoder(initialHeaderTableSize, func(hf HeaderField) { + got = append(got, hf) + }) + dec.SetMaxStringLength(maxStr) + var frag []byte + frag = append(frag[:0], encodeTypeByte(false, false)) + frag = appendVarInt(frag, 7, 3) + frag = append(frag, "foo"...) + frag = appendVarInt(frag, 7, 3) + frag = append(frag, "bar"...) + + if _, err := dec.Write(frag); err != nil { + t.Fatal(err) + } + + want := []HeaderField{{Name: "foo", Value: "bar"}} + if !reflect.DeepEqual(got, want) { + t.Errorf("After small writes, got %v; want %v", got, want) + } + + frag = append(frag[:0], encodeTypeByte(false, false)) + frag = appendVarInt(frag, 7, maxStr*3) + frag = append(frag, make([]byte, maxStr*3)...) + + _, err := dec.Write(frag) + if err != ErrStringLength { + t.Fatalf("Write error = %v; want ErrStringLength", err) + } +} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 000000000..8850e3946 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -0,0 +1,212 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "errors" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + n := rootHuffmanNode + // cur is the bit buffer that has not been fed into n. + // cbits is the number of low order bits in cur that are valid. + // sbits is the number of bits of the symbol prefix being decoded. + cur, cbits, sbits := uint(0), uint8(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + cbits += 8 + sbits += 8 + for cbits >= 8 { + idx := byte(cur >> (cbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } + if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } else { + cbits -= 8 + } + } + } + for cbits > 0 { + n = n.children[byte(cur<<(8-cbits))] + if n == nil { + return ErrInvalidHuffman + } + if n.children != nil || n.codeLen > cbits { + break + } + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } + if sbits > 7 { + // Either there was an incomplete symbol, or overlong padding. + // Both are decoding errors per RFC 7541 section 5.2. + return ErrInvalidHuffman + } + if mask := uint(1< 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 000000000..a66cfbea6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -0,0 +1,479 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "fmt" +) + +// headerFieldTable implements a list of HeaderFields. +// This is used to implement the static and dynamic tables. +type headerFieldTable struct { + // For static tables, entries are never evicted. + // + // For dynamic tables, entries are evicted from ents[0] and added to the end. + // Each entry has a unique id that starts at one and increments for each + // entry that is added. This unique id is stable across evictions, meaning + // it can be used as a pointer to a specific entry. As in hpack, unique ids + // are 1-based. The unique id for ents[k] is k + evictCount + 1. + // + // Zero is not a valid unique id. + // + // evictCount should not overflow in any remotely practical situation. In + // practice, we will have one dynamic table per HTTP/2 connection. If we + // assume a very powerful server that handles 1M QPS per connection and each + // request adds (then evicts) 100 entries from the table, it would still take + // 2M years for evictCount to overflow. + ents []HeaderField + evictCount uint64 + + // byName maps a HeaderField name to the unique id of the newest entry with + // the same name. See above for a definition of "unique id". + byName map[string]uint64 + + // byNameValue maps a HeaderField name/value pair to the unique id of the newest + // entry with the same name and value. See above for a definition of "unique id". + byNameValue map[pairNameValue]uint64 +} + +type pairNameValue struct { + name, value string +} + +func (t *headerFieldTable) init() { + t.byName = make(map[string]uint64) + t.byNameValue = make(map[pairNameValue]uint64) +} + +// len reports the number of entries in the table. +func (t *headerFieldTable) len() int { + return len(t.ents) +} + +// addEntry adds a new entry. +func (t *headerFieldTable) addEntry(f HeaderField) { + id := uint64(t.len()) + t.evictCount + 1 + t.byName[f.Name] = id + t.byNameValue[pairNameValue{f.Name, f.Value}] = id + t.ents = append(t.ents, f) +} + +// evictOldest evicts the n oldest entries in the table. +func (t *headerFieldTable) evictOldest(n int) { + if n > t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + delete(t.byName, f.Name) + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + delete(t.byNameValue, p) + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = newStaticTable() +var staticTableEntries = [...]HeaderField{ + {Name: ":authority"}, + {Name: ":method", Value: "GET"}, + {Name: ":method", Value: "POST"}, + {Name: ":path", Value: "/"}, + {Name: ":path", Value: "/index.html"}, + {Name: ":scheme", Value: "http"}, + {Name: ":scheme", Value: "https"}, + {Name: ":status", Value: "200"}, + {Name: ":status", Value: "204"}, + {Name: ":status", Value: "206"}, + {Name: ":status", Value: "304"}, + {Name: ":status", Value: "400"}, + {Name: ":status", Value: "404"}, + {Name: ":status", Value: "500"}, + {Name: "accept-charset"}, + {Name: "accept-encoding", Value: "gzip, deflate"}, + {Name: "accept-language"}, + {Name: "accept-ranges"}, + {Name: "accept"}, + {Name: "access-control-allow-origin"}, + {Name: "age"}, + {Name: "allow"}, + {Name: "authorization"}, + {Name: "cache-control"}, + {Name: "content-disposition"}, + {Name: "content-encoding"}, + {Name: "content-language"}, + {Name: "content-length"}, + {Name: "content-location"}, + {Name: "content-range"}, + {Name: "content-type"}, + {Name: "cookie"}, + {Name: "date"}, + {Name: "etag"}, + {Name: "expect"}, + {Name: "expires"}, + {Name: "from"}, + {Name: "host"}, + {Name: "if-match"}, + {Name: "if-modified-since"}, + {Name: "if-none-match"}, + {Name: "if-range"}, + {Name: "if-unmodified-since"}, + {Name: "last-modified"}, + {Name: "link"}, + {Name: "location"}, + {Name: "max-forwards"}, + {Name: "proxy-authenticate"}, + {Name: "proxy-authorization"}, + {Name: "range"}, + {Name: "referer"}, + {Name: "refresh"}, + {Name: "retry-after"}, + {Name: "server"}, + {Name: "set-cookie"}, + {Name: "strict-transport-security"}, + {Name: "transfer-encoding"}, + {Name: "user-agent"}, + {Name: "vary"}, + {Name: "via"}, + {Name: "www-authenticate"}, +} + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + for _, e := range staticTableEntries[:] { + t.addEntry(e) + } + return t +} + +var huffmanCodes = [256]uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = [256]uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables_test.go b/vendor/golang.org/x/net/http2/hpack/tables_test.go new file mode 100644 index 000000000..d963f3635 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables_test.go @@ -0,0 +1,214 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bufio" + "regexp" + "strconv" + "strings" + "testing" +) + +func TestHeaderFieldTable(t *testing.T) { + table := &headerFieldTable{} + table.init() + table.addEntry(pair("key1", "value1-1")) + table.addEntry(pair("key2", "value2-1")) + table.addEntry(pair("key1", "value1-2")) + table.addEntry(pair("key3", "value3-1")) + table.addEntry(pair("key4", "value4-1")) + table.addEntry(pair("key2", "value2-2")) + + // Tests will be run twice: once before evicting anything, and + // again after evicting the three oldest entries. + tests := []struct { + f HeaderField + beforeWantStaticI uint64 + beforeWantMatch bool + afterWantStaticI uint64 + afterWantMatch bool + }{ + {HeaderField{"key1", "value1-1", false}, 1, true, 0, false}, + {HeaderField{"key1", "value1-2", false}, 3, true, 0, false}, + {HeaderField{"key1", "value1-3", false}, 3, false, 0, false}, + {HeaderField{"key2", "value2-1", false}, 2, true, 3, false}, + {HeaderField{"key2", "value2-2", false}, 6, true, 3, true}, + {HeaderField{"key2", "value2-3", false}, 6, false, 3, false}, + {HeaderField{"key4", "value4-1", false}, 5, true, 2, true}, + // Name match only, because sensitive. + {HeaderField{"key4", "value4-1", true}, 5, false, 2, false}, + // Key not found. + {HeaderField{"key5", "value5-x", false}, 0, false, 0, false}, + } + + staticToDynamic := func(i uint64) uint64 { + if i == 0 { + return 0 + } + return uint64(table.len()) - i + 1 // dynamic is the reversed table + } + + searchStatic := func(f HeaderField) (uint64, bool) { + old := staticTable + staticTable = table + defer func() { staticTable = old }() + return staticTable.search(f) + } + + searchDynamic := func(f HeaderField) (uint64, bool) { + return table.search(f) + } + + for _, test := range tests { + gotI, gotMatch := searchStatic(test.f) + if wantI, wantMatch := test.beforeWantStaticI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("before evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + gotI, gotMatch = searchDynamic(test.f) + wantDynamicI := staticToDynamic(test.beforeWantStaticI) + if wantI, wantMatch := wantDynamicI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("before evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + } + + table.evictOldest(3) + + for _, test := range tests { + gotI, gotMatch := searchStatic(test.f) + if wantI, wantMatch := test.afterWantStaticI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("after evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + gotI, gotMatch = searchDynamic(test.f) + wantDynamicI := staticToDynamic(test.afterWantStaticI) + if wantI, wantMatch := wantDynamicI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("after evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + } +} + +func TestHeaderFieldTable_LookupMapEviction(t *testing.T) { + table := &headerFieldTable{} + table.init() + table.addEntry(pair("key1", "value1-1")) + table.addEntry(pair("key2", "value2-1")) + table.addEntry(pair("key1", "value1-2")) + table.addEntry(pair("key3", "value3-1")) + table.addEntry(pair("key4", "value4-1")) + table.addEntry(pair("key2", "value2-2")) + + // evict all pairs + table.evictOldest(table.len()) + + if l := table.len(); l > 0 { + t.Errorf("table.len() = %d, want 0", l) + } + + if l := len(table.byName); l > 0 { + t.Errorf("len(table.byName) = %d, want 0", l) + } + + if l := len(table.byNameValue); l > 0 { + t.Errorf("len(table.byNameValue) = %d, want 0", l) + } +} + +func TestStaticTable(t *testing.T) { + fromSpec := ` + +-------+-----------------------------+---------------+ + | 1 | :authority | | + | 2 | :method | GET | + | 3 | :method | POST | + | 4 | :path | / | + | 5 | :path | /index.html | + | 6 | :scheme | http | + | 7 | :scheme | https | + | 8 | :status | 200 | + | 9 | :status | 204 | + | 10 | :status | 206 | + | 11 | :status | 304 | + | 12 | :status | 400 | + | 13 | :status | 404 | + | 14 | :status | 500 | + | 15 | accept-charset | | + | 16 | accept-encoding | gzip, deflate | + | 17 | accept-language | | + | 18 | accept-ranges | | + | 19 | accept | | + | 20 | access-control-allow-origin | | + | 21 | age | | + | 22 | allow | | + | 23 | authorization | | + | 24 | cache-control | | + | 25 | content-disposition | | + | 26 | content-encoding | | + | 27 | content-language | | + | 28 | content-length | | + | 29 | content-location | | + | 30 | content-range | | + | 31 | content-type | | + | 32 | cookie | | + | 33 | date | | + | 34 | etag | | + | 35 | expect | | + | 36 | expires | | + | 37 | from | | + | 38 | host | | + | 39 | if-match | | + | 40 | if-modified-since | | + | 41 | if-none-match | | + | 42 | if-range | | + | 43 | if-unmodified-since | | + | 44 | last-modified | | + | 45 | link | | + | 46 | location | | + | 47 | max-forwards | | + | 48 | proxy-authenticate | | + | 49 | proxy-authorization | | + | 50 | range | | + | 51 | referer | | + | 52 | refresh | | + | 53 | retry-after | | + | 54 | server | | + | 55 | set-cookie | | + | 56 | strict-transport-security | | + | 57 | transfer-encoding | | + | 58 | user-agent | | + | 59 | vary | | + | 60 | via | | + | 61 | www-authenticate | | + +-------+-----------------------------+---------------+ +` + bs := bufio.NewScanner(strings.NewReader(fromSpec)) + re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`) + for bs.Scan() { + l := bs.Text() + if !strings.Contains(l, "|") { + continue + } + m := re.FindStringSubmatch(l) + if m == nil { + continue + } + i, err := strconv.Atoi(m[1]) + if err != nil { + t.Errorf("Bogus integer on line %q", l) + continue + } + if i < 1 || i > staticTable.len() { + t.Errorf("Bogus index %d on line %q", i, l) + continue + } + if got, want := staticTable.ents[i-1].Name, m[2]; got != want { + t.Errorf("header index %d name = %q; want %q", i, got, want) + } + if got, want := staticTable.ents[i-1].Value, m[3]; got != want { + t.Errorf("header index %d value = %q; want %q", i, got, want) + } + } + if err := bs.Err(); err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go new file mode 100644 index 000000000..71db28a87 --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2.go @@ -0,0 +1,391 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http2 implements the HTTP/2 protocol. +// +// This package is low-level and intended to be used directly by very +// few people. Most users will use it indirectly through the automatic +// use by the net/http package (from Go 1.6 and later). +// For use in earlier Go versions see ConfigureServer. (Transport support +// requires Go 1.6 or later) +// +// See https://http2.github.io/ for more information on HTTP/2. +// +// See https://http2.golang.org/ for a test server running this code. +// +package http2 // import "golang.org/x/net/http2" + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" + + "golang.org/x/net/lex/httplex" +) + +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // SETTINGS_MAX_FRAME_SIZE default + // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + initialMaxFrameSize = 16384 + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // http://http2.github.io/http2-spec/#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size + + defaultMaxReadFrameSize = 1 << 20 +) + +var ( + clientPreface = []byte(ClientPreface) +) + +type streamState int + +// HTTP/2 stream states. +// +// See http://tools.ietf.org/html/rfc7540#section-5.1. +// +// For simplicity, the server code merges "reserved (local)" into +// "half-closed (remote)". This is one less state transition to track. +// The only downside is that we send PUSH_PROMISEs slightly less +// liberally than allowable. More discussion here: +// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html +// +// "reserved (remote)" is omitted since the client code does not +// support server push. +const ( + stateIdle streamState = iota + stateOpen + stateHalfClosedLocal + stateHalfClosedRemote + stateClosed +) + +var stateName = [...]string{ + stateIdle: "Idle", + stateOpen: "Open", + stateHalfClosedLocal: "HalfClosedLocal", + stateHalfClosedRemote: "HalfClosedRemote", + stateClosed: "Closed", +} + +func (st streamState) String() string { + return stateName[st] +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See http://http2.github.io/http2-spec/#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} + +// Valid reports whether the setting is valid. +func (s Setting) Valid() error { + // Limits and error codes from 6.5.2 Defined SETTINGS Parameters + switch s.ID { + case SettingEnablePush: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } + case SettingInitialWindowSize: + if s.Val > 1<<31-1 { + return ConnectionError(ErrCodeFlowControl) + } + case SettingMaxFrameSize: + if s.Val < 16384 || s.Val > 1<<24-1 { + return ConnectionError(ErrCodeProtocol) + } + } + return nil +} + +// A SettingID is an HTTP/2 setting as defined in +// http://http2.github.io/http2-spec/#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +var ( + errInvalidHeaderFieldName = errors.New("http2: invalid header field name") + errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") +) + +// validWireHeaderFieldName reports whether v is a valid header field +// name (key). See httplex.ValidHeaderName for the base rules. +// +// Further, http2 says: +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validWireHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !httplex.IsTokenRune(r) { + return false + } + if 'A' <= r && r <= 'Z' { + return false + } + } + return true +} + +var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) + +func init() { + for i := 100; i <= 999; i++ { + if v := http.StatusText(i); v != "" { + httpCodeStringCommon[i] = strconv.Itoa(i) + } + } +} + +func httpCodeString(code int) string { + if s, ok := httpCodeStringCommon[code]; ok { + return s + } + return strconv.Itoa(code) +} + +// from pkg io +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// A gate lets two goroutines coordinate their activities. +type gate chan struct{} + +func (g gate) Done() { g <- struct{}{} } +func (g gate) Wait() { <-g } + +// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). +type closeWaiter chan struct{} + +// Init makes a closeWaiter usable. +// It exists because so a closeWaiter value can be placed inside a +// larger struct and have the Mutex and Cond's memory in the same +// allocation. +func (cw *closeWaiter) Init() { + *cw = make(chan struct{}) +} + +// Close marks the closeWaiter as closed and unblocks any waiters. +func (cw closeWaiter) Close() { + close(cw) +} + +// Wait waits for the closeWaiter to become closed. +func (cw closeWaiter) Wait() { + <-cw +} + +// bufferedWriter is a buffered writer that writes to w. +// Its buffered writer is lazily allocated as needed, to minimize +// idle memory usage with many connections. +type bufferedWriter struct { + w io.Writer // immutable + bw *bufio.Writer // non-nil when data is buffered +} + +func newBufferedWriter(w io.Writer) *bufferedWriter { + return &bufferedWriter{w: w} +} + +// bufWriterPoolBufferSize is the size of bufio.Writer's +// buffers created using bufWriterPool. +// +// TODO: pick a less arbitrary value? this is a bit under +// (3 x typical 1500 byte MTU) at least. Other than that, +// not much thought went into it. +const bufWriterPoolBufferSize = 4 << 10 + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) + }, +} + +func (w *bufferedWriter) Available() int { + if w.bw == nil { + return bufWriterPoolBufferSize + } + return w.bw.Available() +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + if w.bw == nil { + bw := bufWriterPool.Get().(*bufio.Writer) + bw.Reset(w.w) + w.bw = bw + } + return w.bw.Write(p) +} + +func (w *bufferedWriter) Flush() error { + bw := w.bw + if bw == nil { + return nil + } + err := bw.Flush() + bw.Reset(nil) + bufWriterPool.Put(bw) + w.bw = nil + return err +} + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 7230, section 3.3. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owns, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} diff --git a/vendor/golang.org/x/net/http2/http2_test.go b/vendor/golang.org/x/net/http2/http2_test.go new file mode 100644 index 000000000..524877647 --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2_test.go @@ -0,0 +1,199 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "errors" + "flag" + "fmt" + "net/http" + "os/exec" + "strconv" + "strings" + "testing" + + "golang.org/x/net/http2/hpack" +) + +var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.") + +func condSkipFailingTest(t *testing.T) { + if !*knownFailing { + t.Skip("Skipping known-failing test without --known_failing") + } +} + +func init() { + inTests = true + DebugGoroutines = true + flag.BoolVar(&VerboseLogs, "verboseh2", VerboseLogs, "Verbose HTTP/2 debug logging") +} + +func TestSettingString(t *testing.T) { + tests := []struct { + s Setting + want string + }{ + {Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"}, + {Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"}, + } + for i, tt := range tests { + got := fmt.Sprint(tt.s) + if got != tt.want { + t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want) + } + } +} + +type twriter struct { + t testing.TB + st *serverTester // optional +} + +func (w twriter) Write(p []byte) (n int, err error) { + if w.st != nil { + ps := string(p) + for _, phrase := range w.st.logFilter { + if strings.Contains(ps, phrase) { + return len(p), nil // no logging + } + } + } + w.t.Logf("%s", p) + return len(p), nil +} + +// like encodeHeader, but don't add implicit pseudo headers. +func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil { + t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } + } + return buf.Bytes() +} + +// Verify that curl has http2. +func requireCurl(t *testing.T) { + out, err := dockerLogs(curl(t, "--version")) + if err != nil { + t.Skipf("failed to determine curl features; skipping test") + } + if !strings.Contains(string(out), "HTTP2") { + t.Skip("curl doesn't support HTTP2; skipping test") + } +} + +func curl(t *testing.T, args ...string) (container string) { + out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).Output() + if err != nil { + t.Skipf("Failed to run curl in docker: %v, %s", err, out) + } + return strings.TrimSpace(string(out)) +} + +// Verify that h2load exists. +func requireH2load(t *testing.T) { + out, err := dockerLogs(h2load(t, "--version")) + if err != nil { + t.Skipf("failed to probe h2load; skipping test: %s", out) + } + if !strings.Contains(string(out), "h2load nghttp2/") { + t.Skipf("h2load not present; skipping test. (Output=%q)", out) + } +} + +func h2load(t *testing.T, args ...string) (container string) { + out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl"}, args...)...).Output() + if err != nil { + t.Skipf("Failed to run h2load in docker: %v, %s", err, out) + } + return strings.TrimSpace(string(out)) +} + +type puppetCommand struct { + fn func(w http.ResponseWriter, r *http.Request) + done chan<- bool +} + +type handlerPuppet struct { + ch chan puppetCommand +} + +func newHandlerPuppet() *handlerPuppet { + return &handlerPuppet{ + ch: make(chan puppetCommand), + } +} + +func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) { + for cmd := range p.ch { + cmd.fn(w, r) + cmd.done <- true + } +} + +func (p *handlerPuppet) done() { close(p.ch) } +func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) { + done := make(chan bool) + p.ch <- puppetCommand{fn, done} + <-done +} +func dockerLogs(container string) ([]byte, error) { + out, err := exec.Command("docker", "wait", container).CombinedOutput() + if err != nil { + return out, err + } + exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out))) + if err != nil { + return out, errors.New("unexpected exit status from docker wait") + } + out, err = exec.Command("docker", "logs", container).CombinedOutput() + exec.Command("docker", "rm", container).Run() + if err == nil && exitStatus != 0 { + err = fmt.Errorf("exit status %d: %s", exitStatus, out) + } + return out, err +} + +func kill(container string) { + exec.Command("docker", "kill", container).Run() + exec.Command("docker", "rm", container).Run() +} + +func cleanDate(res *http.Response) { + if d := res.Header["Date"]; len(d) == 1 { + d[0] = "XXX" + } +} + +func TestSorterPoolAllocs(t *testing.T) { + ss := []string{"a", "b", "c"} + h := http.Header{ + "a": nil, + "b": nil, + "c": nil, + } + sorter := new(sorter) + + if allocs := testing.AllocsPerRun(100, func() { + sorter.SortStrings(ss) + }); allocs >= 1 { + t.Logf("SortStrings allocs = %v; want <1", allocs) + } + + if allocs := testing.AllocsPerRun(5, func() { + if len(sorter.Keys(h)) != 3 { + t.Fatal("wrong result") + } + }); allocs > 0 { + t.Logf("Keys allocs = %v; want <1", allocs) + } +} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go new file mode 100644 index 000000000..508cebcc4 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go16.go @@ -0,0 +1,21 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.6 + +package http2 + +import ( + "net/http" + "time" +) + +func configureTransport(t1 *http.Transport) (*Transport, error) { + return nil, errTransportVersion +} + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return 0 + +} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go new file mode 100644 index 000000000..140434a79 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go17.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package http2 + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +type contextContext interface { + Done() <-chan struct{} + Err() error +} + +type fakeContext struct{} + +func (fakeContext) Done() <-chan struct{} { return nil } +func (fakeContext) Err() error { panic("should not be called") } + +func reqContext(r *http.Request) fakeContext { + return fakeContext{} +} + +func setResponseUncompressed(res *http.Response) { + // Nothing. +} + +type clientTrace struct{} + +func requestTrace(*http.Request) *clientTrace { return nil } +func traceGotConn(*http.Request, *ClientConn) {} +func traceFirstResponseByte(*clientTrace) {} +func traceWroteHeaders(*clientTrace) {} +func traceWroteRequest(*clientTrace, error) {} +func traceGot100Continue(trace *clientTrace) {} +func traceWait100Continue(trace *clientTrace) {} + +func nop() {} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + return nil, nop +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return ctx, nop +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req +} + +// temporary copy of Go 1.6's private tls.Config.clone: +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} + +func (cc *ClientConn) Ping(ctx contextContext) error { + return cc.ping(ctx) +} + +func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go new file mode 100644 index 000000000..6f8d3f86f --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go18.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package http2 + +import ( + "io" + "net/http" +) + +func configureServer18(h1 *http.Server, h2 *Server) error { + // No IdleTimeout to sync prior to Go 1.8. + return nil +} + +func shouldLogPanic(panicValue interface{}) bool { + return panicValue != nil +} + +func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { + return nil +} + +func reqBodyIsNoBody(io.ReadCloser) bool { return false } + +func go18httpNoBody() io.ReadCloser { return nil } // for tests only diff --git a/vendor/golang.org/x/net/http2/not_go19.go b/vendor/golang.org/x/net/http2/not_go19.go new file mode 100644 index 000000000..5ae07726b --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go19.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package http2 + +import ( + "net/http" +) + +func configureServer19(s *http.Server, conf *Server) error { + // not supported prior to go1.9 + return nil +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go new file mode 100644 index 000000000..a6140099c --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -0,0 +1,163 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "io" + "sync" +) + +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) +type pipe struct { + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer // nil when done reading + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader +} + +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + if p.b == nil { + return 0 + } + return p.b.Len() +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b != nil && p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + p.b = nil + return 0, p.err + } + p.c.Wait() + } +} + +var errClosedPipeWrite = errors.New("write on closed buffer") + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + if p.breakErr != nil { + return len(d), nil // discard when there is no reader + } + return p.b.Write(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + if dst == &p.breakErr { + p.b = nil + } + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec +} diff --git a/vendor/golang.org/x/net/http2/pipe_test.go b/vendor/golang.org/x/net/http2/pipe_test.go new file mode 100644 index 000000000..1bf351ff6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe_test.go @@ -0,0 +1,130 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "testing" +) + +func TestPipeClose(t *testing.T) { + var p pipe + p.b = new(bytes.Buffer) + a := errors.New("a") + b := errors.New("b") + p.CloseWithError(a) + p.CloseWithError(b) + _, err := p.Read(make([]byte, 1)) + if err != a { + t.Errorf("err = %v want %v", err, a) + } +} + +func TestPipeDoneChan(t *testing.T) { + var p pipe + done := p.Done() + select { + case <-done: + t.Fatal("done too soon") + default: + } + p.CloseWithError(io.EOF) + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_ErrFirst(t *testing.T) { + var p pipe + p.CloseWithError(io.EOF) + done := p.Done() + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_Break(t *testing.T) { + var p pipe + done := p.Done() + select { + case <-done: + t.Fatal("done too soon") + default: + } + p.BreakWithError(io.EOF) + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_Break_ErrFirst(t *testing.T) { + var p pipe + p.BreakWithError(io.EOF) + done := p.Done() + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeCloseWithError(t *testing.T) { + p := &pipe{b: new(bytes.Buffer)} + const body = "foo" + io.WriteString(p, body) + a := errors.New("test error") + p.CloseWithError(a) + all, err := ioutil.ReadAll(p) + if string(all) != body { + t.Errorf("read bytes = %q; want %q", all, body) + } + if err != a { + t.Logf("read error = %v, %v", err, a) + } + // Read and Write should fail. + if n, err := p.Write([]byte("abc")); err != errClosedPipeWrite || n != 0 { + t.Errorf("Write(abc) after close\ngot %v, %v\nwant 0, %v", n, err, errClosedPipeWrite) + } + if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 { + t.Errorf("Read() after close\ngot %v, nil\nwant 0, %v", n, errClosedPipeWrite) + } +} + +func TestPipeBreakWithError(t *testing.T) { + p := &pipe{b: new(bytes.Buffer)} + io.WriteString(p, "foo") + a := errors.New("test err") + p.BreakWithError(a) + all, err := ioutil.ReadAll(p) + if string(all) != "" { + t.Errorf("read bytes = %q; want empty string", all) + } + if err != a { + t.Logf("read error = %v, %v", err, a) + } + if p.b != nil { + t.Errorf("buffer should be nil after BreakWithError") + } + // Write should succeed silently. + if n, err := p.Write([]byte("abc")); err != nil || n != 3 { + t.Errorf("Write(abc) after break\ngot %v, %v\nwant 0, nil", n, err) + } + if p.b != nil { + t.Errorf("buffer should be nil after Write") + } + // Read should fail. + if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 { + t.Errorf("Read() after close\ngot %v, nil\nwant 0, not nil", n) + } +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go new file mode 100644 index 000000000..39ed755a8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server.go @@ -0,0 +1,2888 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: turn off the serve goroutine when idle, so +// an idle conn only has the readFrames goroutine active. (which could +// also be optimized probably to pin less memory in crypto/tls). This +// would involve tracking when the serve goroutine is active (atomic +// int32 read/CAS probably?) and starting it up when frames arrive, +// and shutting it down when all handlers exit. the occasional PING +// packets could use time.AfterFunc to call sc.wakeStartServeLoop() +// (which is a no-op if already running) and then queue the PING write +// as normal. The serve loop would then exit in most cases (if no +// Handlers running) and not be woken up again until the PING packet +// returns. + +// TODO (maybe): add a mechanism for Handlers to going into +// half-closed-local mode (rw.(io.Closer) test?) but not exit their +// handler, and continue to be able to read from the +// Request.Body. This would be a somewhat semantic change from HTTP/1 +// (or at least what we expose in net/http), so I'd probably want to +// add it there too. For now, this package says that returning from +// the Handler ServeHTTP function means you're both done reading and +// done writing, without a way to stop just one or the other. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "math" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" +) + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? +) + +var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") +) + +var responseWriterStatePool = sync.Pool{ + New: func() interface{} { + rws := &responseWriterState{} + rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) + return rws + }, +} + +// Test hooks. +var ( + testHookOnConn func() + testHookGetServerConn func(*serverConn) + testHookOnPanicMu *sync.Mutex // nil except in tests + testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) +) + +// Server is an HTTP/2 server. +type Server struct { + // MaxHandlers limits the number of http.Handler ServeHTTP goroutines + // which may run at a time over all connections. + // Negative or zero no limit. + // TODO: implement + MaxHandlers int + + // MaxConcurrentStreams optionally specifies the number of + // concurrent streams that each client may have open at a + // time. This is unrelated to the number of http.Handler goroutines + // which may be active globally, which is MaxHandlers. + // If zero, MaxConcurrentStreams defaults to at least 100, per + // the HTTP/2 spec's recommendations. + MaxConcurrentStreams uint32 + + // MaxReadFrameSize optionally specifies the largest frame + // this server is willing to read. A valid value is between + // 16k and 16M, inclusive. If zero or otherwise invalid, a + // default value is used. + MaxReadFrameSize uint32 + + // PermitProhibitedCipherSuites, if true, permits the use of + // cipher suites prohibited by the HTTP/2 spec. + PermitProhibitedCipherSuites bool + + // IdleTimeout specifies how long until idle clients should be + // closed with a GOAWAY frame. PING frames are not considered + // activity for the purposes of IdleTimeout. + IdleTimeout time.Duration + + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + + // NewWriteScheduler constructs a write scheduler for a connection. + // If nil, a default scheduler is chosen. + NewWriteScheduler func() WriteScheduler + + // Internal state. This is a pointer (rather than embedded directly) + // so that we don't embed a Mutex in this struct, which will make the + // struct non-copyable, which might break some callers. + state *serverInternalState +} + +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 +} + +func (s *Server) maxReadFrameSize() uint32 { + if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { + return v + } + return defaultMaxReadFrameSize +} + +func (s *Server) maxConcurrentStreams() uint32 { + if v := s.MaxConcurrentStreams; v > 0 { + return v + } + return defaultMaxStreams +} + +type serverInternalState struct { + mu sync.Mutex + activeConns map[*serverConn]struct{} +} + +func (s *serverInternalState) registerConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + s.activeConns[sc] = struct{}{} + s.mu.Unlock() +} + +func (s *serverInternalState) unregisterConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + delete(s.activeConns, sc) + s.mu.Unlock() +} + +func (s *serverInternalState) startGracefulShutdown() { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + for sc := range s.activeConns { + sc.startGracefulShutdown() + } + s.mu.Unlock() +} + +// ConfigureServer adds HTTP/2 support to a net/http Server. +// +// The configuration conf may be nil. +// +// ConfigureServer must be called before s begins serving. +func ConfigureServer(s *http.Server, conf *Server) error { + if s == nil { + panic("nil *http.Server") + } + if conf == nil { + conf = new(Server) + } + conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + if err := configureServer18(s, conf); err != nil { + return err + } + if err := configureServer19(s, conf); err != nil { + return err + } + + if s.TLSConfig == nil { + s.TLSConfig = new(tls.Config) + } else if s.TLSConfig.CipherSuites != nil { + // If they already provided a CipherSuite list, return + // an error if it has a bad order or is missing + // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. + haveRequired := false + sawBad := false + for i, cs := range s.TLSConfig.CipherSuites { + switch cs { + case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + // Alternative MTI cipher to not discourage ECDSA-only servers. + // See http://golang.org/cl/30721 for further information. + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + haveRequired = true + } + if isBadCipher(cs) { + sawBad = true + } else if sawBad { + return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) + } + } + if !haveRequired { + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.") + } + } + + // Note: not setting MinVersion to tls.VersionTLS12, + // as we don't want to interfere with HTTP/1.1 traffic + // on the user's server. We enforce TLS 1.2 later once + // we accept a connection. Ideally this should be done + // during next-proto selection, but using TLS <1.2 with + // HTTP/2 is still the client's bug. + + s.TLSConfig.PreferServerCipherSuites = true + + haveNPN := false + for _, p := range s.TLSConfig.NextProtos { + if p == NextProtoTLS { + haveNPN = true + break + } + } + if !haveNPN { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) + } + + if s.TLSNextProto == nil { + s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} + } + protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + if testHookOnConn != nil { + testHookOnConn() + } + conf.ServeConn(c, &ServeConnOpts{ + Handler: h, + BaseConfig: hs, + }) + } + s.TLSNextProto[NextProtoTLS] = protoHandler + return nil +} + +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + baseCtx, cancel := serverConnBaseContext(c, opts) + defer cancel() + + sc := &serverConn{ + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + serveMsgCh: make(chan interface{}, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + s.state.registerConn(sc) + defer s.state.unregisterConn(sc) + + // The net/http package sets the write deadline from the + // http.Server.WriteTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. + if sc.hs.WriteTimeout != 0 { + sc.conn.SetWriteDeadline(time.Time{}) + } + + if s.NewWriteScheduler != nil { + sc.writeSched = s.NewWriteScheduler() + } else { + sc.writeSched = NewRandomWriteScheduler() + } + + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. + sc.flow.add(initialWindowSize) + sc.inflow.add(initialWindowSize) + sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + + fr := NewFramer(sc.bw, c) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + sc.framer = fr + + if tc, ok := c.(connectionStater); ok { + sc.tlsState = new(tls.ConnectionState) + *sc.tlsState = tc.ConnectionState() + // 9.2 Use of TLS Features + // An implementation of HTTP/2 over TLS MUST use TLS + // 1.2 or higher with the restrictions on feature set + // and cipher suite described in this section. Due to + // implementation limitations, it might not be + // possible to fail TLS negotiation. An endpoint MUST + // immediately terminate an HTTP/2 connection that + // does not meet the TLS requirements described in + // this section with a connection error (Section + // 5.4.1) of type INADEQUATE_SECURITY. + if sc.tlsState.Version < tls.VersionTLS12 { + sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") + return + } + + if sc.tlsState.ServerName == "" { + // Client must use SNI, but we don't enforce that anymore, + // since it was causing problems when connecting to bare IP + // addresses during development. + // + // TODO: optionally enforce? Or enforce at the time we receive + // a new request, and verify the ServerName matches the :authority? + // But that precludes proxy situations, perhaps. + // + // So for now, do nothing here again. + } + + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + // "Endpoints MAY choose to generate a connection error + // (Section 5.4.1) of type INADEQUATE_SECURITY if one of + // the prohibited cipher suites are negotiated." + // + // We choose that. In my opinion, the spec is weak + // here. It also says both parties must support at least + // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no + // excuses here. If we really must, we could allow an + // "AllowInsecureWeakCiphers" option on the server later. + // Let's see how it plays out first. + sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) + return + } + } + + if hook := testHookGetServerConn; hook != nil { + hook(sc) + } + sc.serve() +} + +func (sc *serverConn) rejectConn(err ErrCode, debug string) { + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) + // ignoring errors. hanging up anyway. + sc.framer.WriteGoAway(0, err, []byte(debug)) + sc.bw.Flush() + sc.conn.Close() +} + +type serverConn struct { + // Immutable: + srv *Server + hs *http.Server + conn net.Conn + bw *bufferedWriter // writing to conn + handler http.Handler + baseCtx contextContext + framer *Framer + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http + remoteAddrStr string + writeSched WriteScheduler + + // Everything following is owned by the serve loop; use serveG.check(): + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused + + // Owned by the writeFrameAsync goroutine: + headerWriteBuf bytes.Buffer + hpackEncoder *hpack.Encoder + + // Used by startGracefulShutdown. + shutdownOnce sync.Once +} + +func (sc *serverConn) maxHeaderListSize() uint32 { + n := sc.hs.MaxHeaderBytes + if n <= 0 { + n = http.DefaultMaxHeaderBytes + } + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return uint32(n + typicalHeaders*perFieldOverhead) +} + +func (sc *serverConn) curOpenStreams() uint32 { + sc.serveG.check() + return sc.curClientStreams + sc.curPushedStreams +} + +// stream represents a stream. This is the minimal metadata needed by +// the serve goroutine. Most of the actual stream state is owned by +// the http.Handler's goroutine in the responseWriter. Because the +// responseWriter's responseWriterState is recycled at the end of a +// handler, this struct intentionally has no pointer to the +// *responseWriter{,State} itself, as the Handler ending nils out the +// responseWriter's state field. +type stream struct { + // immutable: + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + ctx contextContext + cancelCtx func() + + // owned by serverConn's serve loop: + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 + state streamState + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + writeDeadline *time.Timer // nil if unused + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer +} + +func (sc *serverConn) Framer() *Framer { return sc.framer } +func (sc *serverConn) CloseConn() error { return sc.conn.Close() } +func (sc *serverConn) Flush() error { return sc.bw.Flush() } +func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { + return sc.hpackEncoder, &sc.headerWriteBuf +} + +func (sc *serverConn) state(streamID uint32) (streamState, *stream) { + sc.serveG.check() + // http://tools.ietf.org/html/rfc7540#section-5.1 + if st, ok := sc.streams[streamID]; ok { + return st.state, st + } + // "The first use of a new stream identifier implicitly closes all + // streams in the "idle" state that might have been initiated by + // that peer with a lower-valued stream identifier. For example, if + // a client sends a HEADERS frame on stream 7 without ever sending a + // frame on stream 5, then stream 5 transitions to the "closed" + // state when the first frame for stream 7 is sent or received." + if streamID%2 == 1 { + if streamID <= sc.maxClientStreamID { + return stateClosed, nil + } + } else { + if streamID <= sc.maxPushPromiseID { + return stateClosed, nil + } + } + return stateIdle, nil +} + +// setConnState calls the net/http ConnState hook for this connection, if configured. +// Note that the net/http package does StateNew and StateClosed for us. +// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. +func (sc *serverConn) setConnState(state http.ConnState) { + if sc.hs.ConnState != nil { + sc.hs.ConnState(sc.conn, state) + } +} + +func (sc *serverConn) vlogf(format string, args ...interface{}) { + if VerboseLogs { + sc.logf(format, args...) + } +} + +func (sc *serverConn) logf(format string, args ...interface{}) { + if lg := sc.hs.ErrorLog; lg != nil { + lg.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + +func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { + if err == nil { + return + } + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout { + // Boring, expected errors. + sc.vlogf(format, args...) + } else { + sc.logf(format, args...) + } +} + +func (sc *serverConn) canonicalHeader(v string) string { + sc.serveG.check() + cv, ok := commonCanonHeader[v] + if ok { + return cv + } + cv, ok = sc.canonHeader[v] + if ok { + return cv + } + if sc.canonHeader == nil { + sc.canonHeader = make(map[string]string) + } + cv = http.CanonicalHeaderKey(v) + sc.canonHeader[v] = cv + return cv +} + +type readFrameResult struct { + f Frame // valid until readMore is called + err error + + // readMore should be called once the consumer no longer needs or + // retains f. After readMore, f is invalid and more frames can be + // read. + readMore func() +} + +// readFrames is the loop that reads incoming frames. +// It takes care to only read one frame at a time, blocking until the +// consumer is done with the frame. +// It's run on its own goroutine. +func (sc *serverConn) readFrames() { + gate := make(gate) + gateDone := gate.Done + for { + f, err := sc.framer.ReadFrame() + select { + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: + case <-sc.doneServing: + return + } + select { + case <-gate: + case <-sc.doneServing: + return + } + if terminalReadFrameError(err) { + return + } + } +} + +// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. +type frameWriteResult struct { + wr FrameWriteRequest // what was written (or attempted) + err error // result of the writeFrame call +} + +// writeFrameAsync runs in its own goroutine and writes a single frame +// and then reports when it's done. +// At most one goroutine can be running writeFrameAsync at a time per +// serverConn. +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { + err := wr.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wr, err} +} + +func (sc *serverConn) closeAllStreamsOnConnClose() { + sc.serveG.check() + for _, st := range sc.streams { + sc.closeStream(st, errClientDisconnected) + } +} + +func (sc *serverConn) stopShutdownTimer() { + sc.serveG.check() + if t := sc.shutdownTimer; t != nil { + t.Stop() + } +} + +func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. + if testHookOnPanicMu != nil { + testHookOnPanicMu.Lock() + defer testHookOnPanicMu.Unlock() + } + if testHookOnPanic != nil { + if e := recover(); e != nil { + if testHookOnPanic(sc, e) { + panic(e) + } + } + } +} + +func (sc *serverConn) serve() { + sc.serveG.check() + defer sc.notePanic() + defer sc.conn.Close() + defer sc.closeAllStreamsOnConnClose() + defer sc.stopShutdownTimer() + defer close(sc.doneServing) // unblocks handlers trying to send + + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } + + sc.writeFrame(FrameWriteRequest{ + write: writeSettings{ + {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + }, + }) + sc.unackedSettings++ + + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + + if err := sc.readPreface(); err != nil { + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + return + } + // Now that we've got the preface, get us out of the + // "StateNew" state. We can't go directly to idle, though. + // Active means we read some data and anticipate a request. We'll + // do another Active when we get a HEADERS frame. + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) + + if sc.srv.IdleTimeout != 0 { + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + defer sc.idleTimer.Stop() + } + + go sc.readFrames() // closed by defer sc.conn.Close above + + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + defer settingsTimer.Stop() + + loopNum := 0 + for { + loopNum++ + select { + case wr := <-sc.wantWriteFrameCh: + if se, ok := wr.write.(StreamError); ok { + sc.resetStream(se) + break + } + sc.writeFrame(wr) + case res := <-sc.wroteFrameCh: + sc.wroteFrame(res) + case res := <-sc.readFrameCh: + if !sc.processFrameFromReader(res) { + return + } + res.readMore() + if settingsTimer != nil { + settingsTimer.Stop() + settingsTimer = nil + } + case m := <-sc.bodyReadCh: + sc.noteBodyRead(m.st, m.n) + case msg := <-sc.serveMsgCh: + switch v := msg.(type) { + case func(int): + v(loopNum) // for testing + case *serverMessage: + switch v { + case settingsTimerMsg: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case idleTimerMsg: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case shutdownTimerMsg: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case gracefulShutdownMsg: + sc.startGracefulShutdownInternal() + default: + panic("unknown timer") + } + case *startPushRequest: + sc.startPush(v) + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } + } + + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY + // with no error code (graceful shutdown), don't start the timer until + // all open streams have been completed. + sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame + gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0 + if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) { + sc.shutDownIn(goAwayTimeout) + } + } +} + +func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { + select { + case <-sc.doneServing: + case <-sharedCh: + close(privateCh) + } +} + +type serverMessage int + +// Message values sent to serveMsgCh. +var ( + settingsTimerMsg = new(serverMessage) + idleTimerMsg = new(serverMessage) + shutdownTimerMsg = new(serverMessage) + gracefulShutdownMsg = new(serverMessage) +) + +func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } +func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } + +func (sc *serverConn) sendServeMsg(msg interface{}) { + sc.serveG.checkNotOn() // NOT + select { + case sc.serveMsgCh <- msg: + case <-sc.doneServing: + } +} + +var errPrefaceTimeout = errors.New("timeout waiting for client preface") + +// readPreface reads the ClientPreface greeting from the peer or +// returns errPrefaceTimeout on timeout, or an error if the greeting +// is invalid. +func (sc *serverConn) readPreface() error { + errc := make(chan error, 1) + go func() { + // Read the client preface + buf := make([]byte, len(ClientPreface)) + if _, err := io.ReadFull(sc.conn, buf); err != nil { + errc <- err + } else if !bytes.Equal(buf, clientPreface) { + errc <- fmt.Errorf("bogus greeting %q", buf) + } else { + errc <- nil + } + }() + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + defer timer.Stop() + select { + case <-timer.C: + return errPrefaceTimeout + case err := <-errc: + if err == nil { + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } + } + return err + } +} + +var errChanPool = sync.Pool{ + New: func() interface{} { return make(chan error, 1) }, +} + +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { + ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} + err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: writeArg, + stream: stream, + done: ch, + }) + if err != nil { + return err + } + var frameWriteDone bool // the frame write is done (successfully or not) + select { + case err = <-ch: + frameWriteDone = true + case <-sc.doneServing: + return errClientDisconnected + case <-stream.cw: + // If both ch and stream.cw were ready (as might + // happen on the final Write after an http.Handler + // ends), prefer the write result. Otherwise this + // might just be us successfully closing the stream. + // The writeFrameAsync and serve goroutines guarantee + // that the ch send will happen before the stream.cw + // close. + select { + case err = <-ch: + frameWriteDone = true + default: + return errStreamClosed + } + } + errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } + return err +} + +// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts +// if the connection has gone away. +// +// This must not be run from the serve goroutine itself, else it might +// deadlock writing to sc.wantWriteFrameCh (which is only mildly +// buffered and is read by serve itself). If you're on the serve +// goroutine, call writeFrame instead. +func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { + sc.serveG.checkNotOn() // NOT + select { + case sc.wantWriteFrameCh <- wr: + return nil + case <-sc.doneServing: + // Serve loop is gone. + // Client has closed their connection to the server. + return errClientDisconnected + } +} + +// writeFrame schedules a frame to write and sends it if there's nothing +// already being written. +// +// There is no pushback here (the serve goroutine never blocks). It's +// the http.Handlers that block, waiting for their previous frames to +// make it onto the wire +// +// If you're not on the serve goroutine, use writeFrameFromHandler instead. +func (sc *serverConn) writeFrame(wr FrameWriteRequest) { + sc.serveG.check() + + // If true, wr will not be written and wr.done will not be signaled. + var ignoreWrite bool + + // We are not allowed to write frames on closed streams. RFC 7540 Section + // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on + // a closed stream." Our server never sends PRIORITY, so that exception + // does not apply. + // + // The serverConn might close an open stream while the stream's handler + // is still running. For example, the server might close a stream when it + // receives bad data from the client. If this happens, the handler might + // attempt to write a frame after the stream has been closed (since the + // handler hasn't yet been notified of the close). In this case, we simply + // ignore the frame. The handler will notice that the stream is closed when + // it waits for the frame to be written. + // + // As an exception to this rule, we allow sending RST_STREAM after close. + // This allows us to immediately reject new streams without tracking any + // state for those streams (except for the queued RST_STREAM frame). This + // may result in duplicate RST_STREAMs in some cases, but the client should + // ignore those. + if wr.StreamID() != 0 { + _, isReset := wr.write.(StreamError) + if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { + ignoreWrite = true + } + } + + // Don't send a 100-continue response if we've already sent headers. + // See golang.org/issue/14030. + switch wr.write.(type) { + case *writeResHeaders: + wr.stream.wroteHeaders = true + case write100ContinueHeadersFrame: + if wr.stream.wroteHeaders { + // We do not need to notify wr.done because this frame is + // never written with wr.done != nil. + if wr.done != nil { + panic("wr.done != nil for write100ContinueHeadersFrame") + } + ignoreWrite = true + } + } + + if !ignoreWrite { + sc.writeSched.Push(wr) + } + sc.scheduleFrameWrite() +} + +// startFrameWrite starts a goroutine to write wr (in a separate +// goroutine since that might block on the network), and updates the +// serve goroutine's state about the world, updated from info in wr. +func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { + sc.serveG.check() + if sc.writingFrame { + panic("internal error: can only be writing one frame at a time") + } + + st := wr.stream + if st != nil { + switch st.state { + case stateHalfClosedLocal: + switch wr.write.(type) { + case StreamError, handlerPanicRST, writeWindowUpdate: + // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE + // in this state. (We never send PRIORITY from the server, so that is not checked.) + default: + panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) + } + case stateClosed: + panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) + } + } + if wpp, ok := wr.write.(*writePushPromise); ok { + var err error + wpp.promisedID, err = wpp.allocatePromisedID() + if err != nil { + sc.writingFrameAsync = false + wr.replyToWriter(err) + return + } + } + + sc.writingFrame = true + sc.needsFrameFlush = true + if wr.write.staysWithinBuffer(sc.bw.Available()) { + sc.writingFrameAsync = false + err := wr.write.writeFrame(sc) + sc.wroteFrame(frameWriteResult{wr, err}) + } else { + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr) + } +} + +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// the main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + +// wroteFrame is called on the serve goroutine with the result of +// whatever happened on writeFrameAsync. +func (sc *serverConn) wroteFrame(res frameWriteResult) { + sc.serveG.check() + if !sc.writingFrame { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + sc.writingFrameAsync = false + + wr := res.wr + + if writeEndsStream(wr.write) { + st := wr.stream + if st == nil { + panic("internal error: expecting non-nil stream") + } + switch st.state { + case stateOpen: + // Here we would go to stateHalfClosedLocal in + // theory, but since our handler is done and + // the net/http package provides no mechanism + // for closing a ResponseWriter while still + // reading data (see possible TODO at top of + // this file), we go into closed state here + // anyway, after telling the peer we're + // hanging up on them. We'll transition to + // stateClosed after the RST_STREAM frame is + // written. + st.state = stateHalfClosedLocal + // Section 8.1: a server MAY request that the client abort + // transmission of a request without error by sending a + // RST_STREAM with an error code of NO_ERROR after sending + // a complete response. + sc.resetStream(streamError(st.id, ErrCodeNo)) + case stateHalfClosedRemote: + sc.closeStream(st, errHandlerComplete) + } + } else { + switch v := wr.write.(type) { + case StreamError: + // st may be unknown if the RST_STREAM was generated to reject bad input. + if st, ok := sc.streams[v.StreamID]; ok { + sc.closeStream(st, v) + } + case handlerPanicRST: + sc.closeStream(wr.stream, errHandlerPanicked) + } + } + + // Reply (if requested) to unblock the ServeHTTP goroutine. + wr.replyToWriter(res.err) + + sc.scheduleFrameWrite() +} + +// scheduleFrameWrite tickles the frame writing scheduler. +// +// If a frame is already being written, nothing happens. This will be called again +// when the frame is done being written. +// +// If a frame isn't being written we need to send one, the best frame +// to send is selected, preferring first things that aren't +// stream-specific (e.g. ACKing settings), and then finding the +// highest priority stream. +// +// If a frame isn't being written and there's nothing else to send, we +// flush the write buffer. +func (sc *serverConn) scheduleFrameWrite() { + sc.serveG.check() + if sc.writingFrame || sc.inFrameScheduleLoop { + return + } + sc.inFrameScheduleLoop = true + for !sc.writingFrameAsync { + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(FrameWriteRequest{ + write: &writeGoAway{ + maxStreamID: sc.maxClientStreamID, + code: sc.goAwayCode, + }, + }) + continue + } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) + continue + } + if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { + if wr, ok := sc.writeSched.Pop(); ok { + sc.startFrameWrite(wr) + continue + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + continue + } + break + } + sc.inFrameScheduleLoop = false +} + +// startGracefulShutdown gracefully shuts down a connection. This +// sends GOAWAY with ErrCodeNo to tell the client we're gracefully +// shutting down. The connection isn't closed until all current +// streams are done. +// +// startGracefulShutdown returns immediately; it does not wait until +// the connection has shut down. +func (sc *serverConn) startGracefulShutdown() { + sc.serveG.checkNotOn() // NOT + sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) +} + +// After sending GOAWAY, the connection will close after goAwayTimeout. +// If we close the connection immediately after sending GOAWAY, there may +// be unsent data in our kernel receive buffer, which will cause the kernel +// to send a TCP RST on close() instead of a FIN. This RST will abort the +// connection immediately, whether or not the client had received the GOAWAY. +// +// Ideally we should delay for at least 1 RTT + epsilon so the client has +// a chance to read the GOAWAY and stop sending messages. Measuring RTT +// is hard, so we approximate with 1 second. See golang.org/issue/18701. +// +// This is a var so it can be shorter in tests, where all requests uses the +// loopback interface making the expected RTT very small. +// +// TODO: configurable? +var goAwayTimeout = 1 * time.Second + +func (sc *serverConn) startGracefulShutdownInternal() { + sc.goAway(ErrCodeNo) +} + +func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + if sc.inGoAway { + return + } + sc.inGoAway = true + sc.needToSendGoAway = true + sc.goAwayCode = code + sc.scheduleFrameWrite() +} + +func (sc *serverConn) shutDownIn(d time.Duration) { + sc.serveG.check() + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) +} + +func (sc *serverConn) resetStream(se StreamError) { + sc.serveG.check() + sc.writeFrame(FrameWriteRequest{write: se}) + if st, ok := sc.streams[se.StreamID]; ok { + st.resetQueued = true + } +} + +// processFrameFromReader processes the serve loop's read from readFrameCh from the +// frame-reading goroutine. +// processFrameFromReader returns whether the connection should be kept open. +func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { + sc.serveG.check() + err := res.err + if err != nil { + if err == ErrFrameTooLarge { + sc.goAway(ErrCodeFrameSize) + return true // goAway will close the loop + } + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) + if clientGone { + // TODO: could we also get into this state if + // the peer does a half close + // (e.g. CloseWrite) because they're done + // sending frames but they're still wanting + // our open replies? Investigate. + // TODO: add CloseWrite to crypto/tls.Conn first + // so we have a way to test this? I suppose + // just for testing we could have a non-TLS mode. + return false + } + } else { + f := res.f + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } + err = sc.processFrame(f) + if err == nil { + return true + } + } + + switch ev := err.(type) { + case StreamError: + sc.resetStream(ev) + return true + case goAwayFlowError: + sc.goAway(ErrCodeFlowControl) + return true + case ConnectionError: + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) + sc.goAway(ErrCode(ev)) + return true // goAway will handle shutdown + default: + if res.err != nil { + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + } else { + sc.logf("http2: server closing client connection: %v", err) + } + return false + } +} + +func (sc *serverConn) processFrame(f Frame) error { + sc.serveG.check() + + // First frame received must be SETTINGS. + if !sc.sawFirstSettings { + if _, ok := f.(*SettingsFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } + sc.sawFirstSettings = true + } + + switch f := f.(type) { + case *SettingsFrame: + return sc.processSettings(f) + case *MetaHeadersFrame: + return sc.processHeaders(f) + case *WindowUpdateFrame: + return sc.processWindowUpdate(f) + case *PingFrame: + return sc.processPing(f) + case *DataFrame: + return sc.processData(f) + case *RSTStreamFrame: + return sc.processResetStream(f) + case *PriorityFrame: + return sc.processPriority(f) + case *GoAwayFrame: + return sc.processGoAway(f) + case *PushPromiseFrame: + // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE + // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + default: + sc.vlogf("http2: server ignoring frame: %v", f.Header()) + return nil + } +} + +func (sc *serverConn) processPing(f *PingFrame) error { + sc.serveG.check() + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + if f.StreamID != 0 { + // "PING frames are not associated with any individual + // stream. If a PING frame is received with a stream + // identifier field value other than 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) + return nil +} + +func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { + sc.serveG.check() + switch { + case f.StreamID != 0: // stream-level flow control + state, st := sc.state(f.StreamID) + if state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil { + // "WINDOW_UPDATE can be sent by a peer that has sent a + // frame bearing the END_STREAM flag. This means that a + // receiver could receive a WINDOW_UPDATE frame on a "half + // closed (remote)" or "closed" stream. A receiver MUST + // NOT treat this as an error, see Section 5.1." + return nil + } + if !st.flow.add(int32(f.Increment)) { + return streamError(f.StreamID, ErrCodeFlowControl) + } + default: // connection-level flow control + if !sc.flow.add(int32(f.Increment)) { + return goAwayFlowError{} + } + } + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { + sc.serveG.check() + + state, st := sc.state(f.StreamID) + if state == stateIdle { + // 6.4 "RST_STREAM frames MUST NOT be sent for a + // stream in the "idle" state. If a RST_STREAM frame + // identifying an idle stream is received, the + // recipient MUST treat this as a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if st != nil { + st.cancelCtx() + sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) + } + return nil +} + +func (sc *serverConn) closeStream(st *stream, err error) { + sc.serveG.check() + if st.state == stateIdle || st.state == stateClosed { + panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) + } + st.state = stateClosed + if st.writeDeadline != nil { + st.writeDeadline.Stop() + } + if st.isPushed() { + sc.curPushedStreams-- + } else { + sc.curClientStreams-- + } + delete(sc.streams, st.id) + if len(sc.streams) == 0 { + sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer.Reset(sc.srv.IdleTimeout) + } + if h1ServerKeepAlivesDisabled(sc.hs) { + sc.startGracefulShutdownInternal() + } + } + if p := st.body; p != nil { + // Return any buffered unread bytes worth of conn-level flow control. + // See golang.org/issue/16481 + sc.sendWindowUpdate(nil, p.Len()) + + p.CloseWithError(err) + } + st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc + sc.writeSched.CloseStream(st.id) +} + +func (sc *serverConn) processSettings(f *SettingsFrame) error { + sc.serveG.check() + if f.IsAck() { + sc.unackedSettings-- + if sc.unackedSettings < 0 { + // Why is the peer ACKing settings we never sent? + // The spec doesn't mention this case, but + // hang up on them anyway. + return ConnectionError(ErrCodeProtocol) + } + return nil + } + if err := f.ForeachSetting(sc.processSetting); err != nil { + return err + } + sc.needToSendSettingsAck = true + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processSetting(s Setting) error { + sc.serveG.check() + if err := s.Valid(); err != nil { + return err + } + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } + switch s.ID { + case SettingHeaderTableSize: + sc.headerTableSize = s.Val + sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) + case SettingEnablePush: + sc.pushEnabled = s.Val != 0 + case SettingMaxConcurrentStreams: + sc.clientMaxStreams = s.Val + case SettingInitialWindowSize: + return sc.processSettingInitialWindowSize(s.Val) + case SettingMaxFrameSize: + sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 + case SettingMaxHeaderListSize: + sc.peerMaxHeaderListSize = s.Val + default: + // Unknown setting: "An endpoint that receives a SETTINGS + // frame with any unknown or unsupported identifier MUST + // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } + } + return nil +} + +func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { + sc.serveG.check() + // Note: val already validated to be within range by + // processSetting's Valid call. + + // "A SETTINGS frame can alter the initial flow control window + // size for all current streams. When the value of + // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST + // adjust the size of all stream flow control windows that it + // maintains by the difference between the new value and the + // old value." + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative + for _, st := range sc.streams { + if !st.flow.add(growth) { + // 6.9.2 Initial Flow Control Window Size + // "An endpoint MUST treat a change to + // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow + // control window to exceed the maximum size as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR." + return ConnectionError(ErrCodeFlowControl) + } + } + return nil +} + +func (sc *serverConn) processData(f *DataFrame) error { + sc.serveG.check() + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + data := f.Data() + + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." + id := f.Header().StreamID + state, st := sc.state(id) + if id == 0 || state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { + // This includes sending a RST_STREAM if the stream is + // in stateHalfClosedLocal (which currently means that + // the http.Handler returned, so it's done reading & + // done writing). Try to stop the client from sending + // more DATA. + + // But still enforce their connection-level flow control, + // and return any flow control bytes since we're not going + // to consume them. + if sc.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + // Deduct the flow control from inflow, since we're + // going to immediately add it back in + // sendWindowUpdate, which also schedules sending the + // frames. + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + + if st != nil && st.resetQueued { + // Already have a stream error in flight. Don't send another. + return nil + } + return streamError(id, ErrCodeStreamClosed) + } + if st.body == nil { + panic("internal error: should have a body in this state") + } + + // Sender sending more than they'd declared? + if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + return streamError(id, ErrCodeStreamClosed) + } + if f.Length > 0 { + // Check whether the client has flow control quota. + if st.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + st.inflow.take(int32(f.Length)) + + if len(data) > 0 { + wrote, err := st.body.Write(data) + if err != nil { + return streamError(id, ErrCodeStreamClosed) + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) + } + + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) + } + } + if f.StreamEnded() { + st.endStream() + } + return nil +} + +func (sc *serverConn) processGoAway(f *GoAwayFrame) error { + sc.serveG.check() + if f.ErrCode != ErrCodeNo { + sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } else { + sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } + sc.startGracefulShutdownInternal() + // http://tools.ietf.org/html/rfc7540#section-6.8 + // We should not create any new streams, which means we should disable push. + sc.pushEnabled = false + return nil +} + +// isPushed reports whether the stream is server-initiated. +func (st *stream) isPushed() bool { + return st.id%2 == 0 +} + +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +// onWriteTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's WriteTimeout has fired. +func (st *stream) onWriteTimeout() { + st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + sc.serveG.check() + id := f.StreamID + if sc.inGoAway { + // Ignore. + return nil + } + // http://tools.ietf.org/html/rfc7540#section-5.1.1 + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { + return ConnectionError(ErrCodeProtocol) + } + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + if st := sc.streams[f.StreamID]; st != nil { + if st.resetQueued { + // We're sending RST_STREAM to close the stream, so don't bother + // processing this frame. + return nil + } + return st.processTrailerHeaders(f) + } + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxClientStreamID { + return ConnectionError(ErrCodeProtocol) + } + sc.maxClientStreamID = id + + if sc.idleTimer != nil { + sc.idleTimer.Stop() + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.2 + // [...] Endpoints MUST NOT exceed the limit set by their peer. An + // endpoint that receives a HEADERS frame that causes their + // advertised concurrent stream limit to be exceeded MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR + // or REFUSED_STREAM. + if sc.curClientStreams+1 > sc.advMaxStreams { + if sc.unackedSettings == 0 { + // They should know better. + return streamError(id, ErrCodeProtocol) + } + // Assume it's a network race, where they just haven't + // received our last SETTINGS update. But actually + // this can't happen yet, because we don't yet provide + // a way for users to adjust server parameters at + // runtime. + return streamError(id, ErrCodeRefusedStream) + } + + initialState := stateOpen + if f.StreamEnded() { + initialState = stateHalfClosedRemote + } + st := sc.newStream(id, 0, initialState) + + if f.HasPriority() { + if err := checkPriority(f.StreamID, f.Priority); err != nil { + return err + } + sc.writeSched.AdjustStream(st.id, f.Priority) + } + + rw, req, err := sc.newWriterAndRequest(st, f) + if err != nil { + return err + } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + st.body = req.Body.(*requestBody).pipe // may be nil + st.declBodyBytes = req.ContentLength + + handler := sc.handler.ServeHTTP + if f.Truncated { + // Their header list was too long. Send a 431 error. + handler = handleHeaderListTooLong + } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { + handler = new400Handler(err) + } + + // The net/http package sets the read deadline from the + // http.Server.ReadTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already + // set. Disarm it here after the request headers are read, + // similar to how the http1 server works. Here it's + // technically more like the http1 Server's ReadHeaderTimeout + // (in Go 1.8), though. That's a more sane option anyway. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, handler) + return nil +} + +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return streamError(st.id, ErrCodeProtocol) + } + + if len(f.PseudoFields()) > 0 { + return streamError(st.id, ErrCodeProtocol) + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + if !ValidTrailerHeader(key) { + // TODO: send more details to the peer somehow. But http2 has + // no way to send debug data at a stream level. Discuss with + // HTTP folk. + return streamError(st.id, ErrCodeProtocol) + } + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() + return nil +} + +func checkPriority(streamID uint32, p PriorityParam) error { + if streamID == p.StreamDep { + // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." + // Section 5.3.3 says that a stream can depend on one of its dependencies, + // so it's only self-dependencies that are forbidden. + return streamError(streamID, ErrCodeProtocol) + } + return nil +} + +func (sc *serverConn) processPriority(f *PriorityFrame) error { + if sc.inGoAway { + return nil + } + if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + return err + } + sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) + return nil +} + +func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { + sc.serveG.check() + if id == 0 { + panic("internal error: cannot create stream with id 0") + } + + ctx, cancelCtx := contextWithCancel(sc.baseCtx) + st := &stream{ + sc: sc, + id: id, + state: state, + ctx: ctx, + cancelCtx: cancelCtx, + } + st.cw.Init() + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + if sc.hs.WriteTimeout != 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + } + + sc.streams[id] = st + sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) + if st.isPushed() { + sc.curPushedStreams++ + } else { + sc.curClientStreams++ + } + if sc.curOpenStreams() == 1 { + sc.setConnState(http.StateActive) + } + + return st +} + +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + rp := requestParam{ + method: f.PseudoValue("method"), + scheme: f.PseudoValue("scheme"), + authority: f.PseudoValue("authority"), + path: f.PseudoValue("path"), + } + + isConnect := rp.method == "CONNECT" + if isConnect { + if rp.path != "" || rp.scheme != "" || rp.authority == "" { + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + // See 8.1.2.6 Malformed Requests and Responses: + // + // Malformed requests or responses that are detected + // MUST be treated as a stream error (Section 5.4.2) + // of type PROTOCOL_ERROR." + // + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid + // value for the :method, :scheme, and :path + // pseudo-header fields" + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + bodyOpen := !f.StreamEnded() + if rp.method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + rp.header = make(http.Header) + for _, hf := range f.RegularFields() { + rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if rp.authority == "" { + rp.authority = rp.header.Get("Host") + } + + rw, req, err := sc.newWriterAndRequestNoBody(st, rp) + if err != nil { + return nil, nil, err + } + if bodyOpen { + if vv, ok := rp.header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } + } + return rw, req, nil +} + +type requestParam struct { + method string + scheme, authority, path string + header http.Header +} + +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + var tlsState *tls.ConnectionState // nil if not scheme https + if rp.scheme == "https" { + tlsState = sc.tlsState + } + + needsContinue := rp.header.Get("Expect") == "100-continue" + if needsContinue { + rp.header.Del("Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.header["Cookie"]; len(cookies) > 1 { + rp.header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range rp.header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } + } + delete(rp.header, "Trailer") + + var url_ *url.URL + var requestURI string + if rp.method == "CONNECT" { + url_ = &url.URL{Host: rp.authority} + requestURI = rp.authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.path) + if err != nil { + return nil, nil, streamError(st.id, ErrCodeProtocol) + } + requestURI = rp.path + } + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, + } + req := &http.Request{ + Method: rp.method, + URL: url_, + RemoteAddr: sc.remoteAddrStr, + Header: rp.header, + RequestURI: requestURI, + Proto: "HTTP/2.0", + ProtoMajor: 2, + ProtoMinor: 0, + TLS: tlsState, + Host: rp.authority, + Body: body, + Trailer: trailer, + } + req = requestWithContext(req, st.ctx) + + rws := responseWriterStatePool.Get().(*responseWriterState) + bwSave := rws.bw + *rws = responseWriterState{} // zero all the fields + rws.conn = sc + rws.bw = bwSave + rws.bw.Reset(chunkWriter{rws}) + rws.stream = st + rws.req = req + rws.body = body + + rw := &responseWriter{rws: rws} + return rw, req, nil +} + +// Run on its own goroutine. +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + didPanic := true + defer func() { + rw.rws.stream.cancelCtx() + if didPanic { + e := recover() + sc.writeFrameFromHandler(FrameWriteRequest{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + // Same as net/http: + if shouldLogPanic(e) { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + } + return + } + rw.handlerDone() + }() + handler(rw, req) + didPanic = false +} + +func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { + // 10.5.1 Limits on Header Block Size: + // .. "A server that receives a larger header block than it is + // willing to handle can send an HTTP 431 (Request Header Fields Too + // Large) status code" + const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ + w.WriteHeader(statusRequestHeaderFieldsTooLarge) + io.WriteString(w, "

    HTTP Error 431

    Request Header Field(s) Too Large

    ") +} + +// called from handler goroutines. +// h may be nil. +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { + sc.serveG.checkNotOn() // NOT on + var errc chan error + if headerData.h != nil { + // If there's a header map (which we don't own), so we have to block on + // waiting for this frame to be written, so an http.Flush mid-handler + // writes out the correct value of keys, before a handler later potentially + // mutates it. + errc = errChanPool.Get().(chan error) + } + if err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: headerData, + stream: st, + done: errc, + }); err != nil { + return err + } + if errc != nil { + select { + case err := <-errc: + errChanPool.Put(errc) + return err + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + } + } + return nil +} + +// called from handler goroutines. +func (sc *serverConn) write100ContinueHeaders(st *stream) { + sc.writeFrameFromHandler(FrameWriteRequest{ + write: write100ContinueHeadersFrame{st.id}, + stream: st, + }) +} + +// A bodyReadMsg tells the server loop that the http.Handler read n +// bytes of the DATA from the client on the given stream. +type bodyReadMsg struct { + st *stream + n int +} + +// called from handler goroutines. +// Notes that the handler for the given stream ID read n bytes of its body +// and schedules flow control tokens to be sent. +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { + sc.serveG.checkNotOn() // NOT on + if n > 0 { + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } + } +} + +func (sc *serverConn) noteBodyRead(st *stream, n int) { + sc.serveG.check() + sc.sendWindowUpdate(nil, n) // conn-level + if st.state != stateHalfClosedRemote && st.state != stateClosed { + // Don't send this WINDOW_UPDATE if the stream is closed + // remotely. + sc.sendWindowUpdate(st, n) + } +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { + sc.serveG.check() + // "The legal range for the increment to the flow control + // window is 1 to 2^31-1 (2,147,483,647) octets." + // A Go Read call on 64-bit machines could in theory read + // a larger Read than this. Very unlikely, but we handle it here + // rather than elsewhere for now. + const maxUint31 = 1<<31 - 1 + for n >= maxUint31 { + sc.sendWindowUpdate32(st, maxUint31) + n -= maxUint31 + } + sc.sendWindowUpdate32(st, int32(n)) +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.serveG.check() + if n == 0 { + return + } + if n < 0 { + panic("negative update") + } + var streamID uint32 + if st != nil { + streamID = st.id + } + sc.writeFrame(FrameWriteRequest{ + write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + stream: st, + }) + var ok bool + if st == nil { + ok = sc.inflow.add(n) + } else { + ok = st.inflow.add(n) + } + if !ok { + panic("internal error; sent too many window updates without decrements?") + } +} + +// requestBody is the Handler's Request.Body type. +// Read and Close may be called concurrently. +type requestBody struct { + stream *stream + conn *serverConn + closed bool // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue +} + +func (b *requestBody) Close() error { + if b.pipe != nil && !b.closed { + b.pipe.BreakWithError(errClosedBody) + } + b.closed = true + return nil +} + +func (b *requestBody) Read(p []byte) (n int, err error) { + if b.needsContinue { + b.needsContinue = false + b.conn.write100ContinueHeaders(b.stream) + } + if b.pipe == nil || b.sawEOF { + return 0, io.EOF + } + n, err = b.pipe.Read(p) + if err == io.EOF { + b.sawEOF = true + } + if b.conn == nil && inTests { + return + } + b.conn.noteBodyReadFromHandler(b.stream, n, err) + return +} + +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriterState pointer inside is zeroed at the end of a +// request (in handlerDone) and calls on the responseWriter thereafter +// simply crash (caller's mistake), but the much larger responseWriterState +// and buffers are reused between multiple requests. +type responseWriter struct { + rws *responseWriterState +} + +// Optional http.ResponseWriter interfaces implemented. +var ( + _ http.CloseNotifier = (*responseWriter)(nil) + _ http.Flusher = (*responseWriter)(nil) + _ stringWriter = (*responseWriter)(nil) +) + +type responseWriterState struct { + // immutable within a request: + stream *stream + req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't + conn *serverConn + + // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc + bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} + + // mutated by http.Handler goroutine: + handlerHeader http.Header // nil until called + snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk + status int // status code passed to WriteHeader + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 + + closeNotifierMu sync.Mutex // guards closeNotifierCh + closeNotifierCh chan bool // nil until first used +} + +type chunkWriter struct{ rws *responseWriterState } + +func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } + +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + if !ValidTrailerHeader(k) { + // Forbidden by RFC 7230, section 4.1.2. + rws.conn.logf("ignoring invalid trailer %q", k) + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + +// writeChunk writes chunks from the bufio.Writer. But because +// bufio.Writer may bypass its chunking, sometimes p may be +// arbitrarily large. +// +// writeChunk is also responsible (on the first chunk) for sending the +// HEADER response. +func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if !rws.wroteHeader { + rws.writeHeader(200) + } + + isHeadResp := rws.req.Method == "HEAD" + if !rws.sentHeader { + rws.sentHeader = true + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + clen64, err := strconv.ParseInt(clen, 10, 64) + if err == nil && clen64 >= 0 { + rws.sentContentLen = clen64 + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + clen = strconv.Itoa(len(p)) + } + _, hasContentType := rws.snapHeader["Content-Type"] + if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { + ctype = http.DetectContentType(p) + } + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: rws.status, + h: rws.snapHeader, + endStream: endStream, + contentType: ctype, + contentLength: clen, + date: date, + }) + if err != nil { + rws.dirty = true + return 0, err + } + if endStream { + return 0, nil + } + } + if isHeadResp { + return len(p), nil + } + if len(p) == 0 && !rws.handlerDone { + return 0, nil + } + + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + endStream := rws.handlerDone && !rws.hasTrailers() + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true + return 0, err + } + } + + if rws.handlerDone && rws.hasTrailers() { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + if err != nil { + rws.dirty = true + } + return len(p), err + } + return len(p), nil +} + +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 7230 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + +func (w *responseWriter) Flush() { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.bw.Buffered() > 0 { + if err := rws.bw.Flush(); err != nil { + // Ignore the error. The frame writer already knows. + return + } + } else { + // The bufio.Writer won't call chunkWriter.Write + // (writeChunk with zero bytes, so we have to do it + // ourselves to force the HTTP response header and/or + // final DATA frame (with END_STREAM) to be sent. + rws.writeChunk(nil) + } +} + +func (w *responseWriter) CloseNotify() <-chan bool { + rws := w.rws + if rws == nil { + panic("CloseNotify called after Handler finished") + } + rws.closeNotifierMu.Lock() + ch := rws.closeNotifierCh + if ch == nil { + ch = make(chan bool, 1) + rws.closeNotifierCh = ch + cw := rws.stream.cw + go func() { + cw.Wait() // wait for close + ch <- true + }() + } + rws.closeNotifierMu.Unlock() + return ch +} + +func (w *responseWriter) Header() http.Header { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.handlerHeader == nil { + rws.handlerHeader = make(http.Header) + } + return rws.handlerHeader +} + +// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode. +func checkWriteHeaderCode(code int) { + // Issue 22880: require valid WriteHeader status codes. + // For now we only enforce that it's three digits. + // In the future we might block things over 599 (600 and above aren't defined + // at http://httpwg.org/specs/rfc7231.html#status.codes) + // and we might block under 200 (once we have more mature 1xx support). + // But for now any three digits. + // + // We used to send "HTTP/1.1 000 0" on the wire in responses but there's + // no equivalent bogus thing we can realistically send in HTTP/2, + // so we'll consistently panic instead and help people find their bugs + // early. (We can't return an error from WriteHeader even if we wanted to.) + if code < 100 || code > 999 { + panic(fmt.Sprintf("invalid WriteHeader code %v", code)) + } +} + +func (w *responseWriter) WriteHeader(code int) { + rws := w.rws + if rws == nil { + panic("WriteHeader called after Handler finished") + } + rws.writeHeader(code) +} + +func (rws *responseWriterState) writeHeader(code int) { + if !rws.wroteHeader { + checkWriteHeaderCode(code) + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) + } + } +} + +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// The Life Of A Write is like this: +// +// * Handler calls w.Write or w.WriteString -> +// * -> rws.bw (*bufio.Writer) -> +// * (Handler might call Flush) +// * -> chunkWriter{rws} +// * -> responseWriterState.writeChunk(p []byte) +// * -> responseWriterState.writeChunk (most of the magic; see comment there) +func (w *responseWriter) Write(p []byte) (n int, err error) { + return w.write(len(p), p, "") +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + return w.write(len(s), nil, s) +} + +// either dataB or dataS is non-zero. +func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { + rws := w.rws + if rws == nil { + panic("Write called after Handler finished") + } + if !rws.wroteHeader { + w.WriteHeader(200) + } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + + if dataB != nil { + return rws.bw.Write(dataB) + } else { + return rws.bw.WriteString(dataS) + } +} + +func (w *responseWriter) handlerDone() { + rws := w.rws + dirty := rws.dirty + rws.handlerDone = true + w.Flush() + w.rws = nil + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } +} + +// Push errors. +var ( + ErrRecursivePush = errors.New("http2: recursive push not allowed") + ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") +) + +// pushOptions is the internal version of http.PushOptions, which we +// cannot include here because it's only defined in Go 1.8 and later. +type pushOptions struct { + Method string + Header http.Header +} + +func (w *responseWriter) push(target string, opts pushOptions) error { + st := w.rws.stream + sc := st.sc + sc.serveG.checkNotOn() + + // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." + // http://tools.ietf.org/html/rfc7540#section-6.6 + if st.isPushed() { + return ErrRecursivePush + } + + // Default options. + if opts.Method == "" { + opts.Method = "GET" + } + if opts.Header == nil { + opts.Header = http.Header{} + } + wantScheme := "http" + if w.rws.req.TLS != nil { + wantScheme = "https" + } + + // Validate the request. + u, err := url.Parse(target) + if err != nil { + return err + } + if u.Scheme == "" { + if !strings.HasPrefix(target, "/") { + return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) + } + u.Scheme = wantScheme + u.Host = w.rws.req.Host + } else { + if u.Scheme != wantScheme { + return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) + } + if u.Host == "" { + return errors.New("URL must have a host") + } + } + for k := range opts.Header { + if strings.HasPrefix(k, ":") { + return fmt.Errorf("promised request headers cannot include pseudo header %q", k) + } + // These headers are meaningful only if the request has a body, + // but PUSH_PROMISE requests cannot have a body. + // http://tools.ietf.org/html/rfc7540#section-8.2 + // Also disallow Host, since the promised URL must be absolute. + switch strings.ToLower(k) { + case "content-length", "content-encoding", "trailer", "te", "expect", "host": + return fmt.Errorf("promised request headers cannot include %q", k) + } + } + if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { + return err + } + + // The RFC effectively limits promised requests to GET and HEAD: + // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" + // http://tools.ietf.org/html/rfc7540#section-8.2 + if opts.Method != "GET" && opts.Method != "HEAD" { + return fmt.Errorf("method %q must be GET or HEAD", opts.Method) + } + + msg := &startPushRequest{ + parent: st, + method: opts.Method, + url: u, + header: cloneHeader(opts.Header), + done: errChanPool.Get().(chan error), + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case sc.serveMsgCh <- msg: + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case err := <-msg.done: + errChanPool.Put(msg.done) + return err + } +} + +type startPushRequest struct { + parent *stream + method string + url *url.URL + header http.Header + done chan error +} + +func (sc *serverConn) startPush(msg *startPushRequest) { + sc.serveG.check() + + // http://tools.ietf.org/html/rfc7540#section-6.6. + // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that + // is in either the "open" or "half-closed (remote)" state. + if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { + // responseWriter.Push checks that the stream is peer-initiaed. + msg.done <- errStreamClosed + return + } + + // http://tools.ietf.org/html/rfc7540#section-6.6. + if !sc.pushEnabled { + msg.done <- http.ErrNotSupported + return + } + + // PUSH_PROMISE frames must be sent in increasing order by stream ID, so + // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE + // is written. Once the ID is allocated, we start the request handler. + allocatePromisedID := func() (uint32, error) { + sc.serveG.check() + + // Check this again, just in case. Technically, we might have received + // an updated SETTINGS by the time we got around to writing this frame. + if !sc.pushEnabled { + return 0, http.ErrNotSupported + } + // http://tools.ietf.org/html/rfc7540#section-6.5.2. + if sc.curPushedStreams+1 > sc.clientMaxStreams { + return 0, ErrPushLimitReached + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.1. + // Streams initiated by the server MUST use even-numbered identifiers. + // A server that is unable to establish a new stream identifier can send a GOAWAY + // frame so that the client is forced to open a new connection for new streams. + if sc.maxPushPromiseID+2 >= 1<<31 { + sc.startGracefulShutdownInternal() + return 0, ErrPushLimitReached + } + sc.maxPushPromiseID += 2 + promisedID := sc.maxPushPromiseID + + // http://tools.ietf.org/html/rfc7540#section-8.2. + // Strictly speaking, the new stream should start in "reserved (local)", then + // transition to "half closed (remote)" after sending the initial HEADERS, but + // we start in "half closed (remote)" for simplicity. + // See further comments at the definition of stateHalfClosedRemote. + promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) + rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ + method: msg.method, + scheme: msg.url.Scheme, + authority: msg.url.Host, + path: msg.url.RequestURI(), + header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + }) + if err != nil { + // Should not happen, since we've already validated msg.url. + panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + return promisedID, nil + } + + sc.writeFrame(FrameWriteRequest{ + write: &writePushPromise{ + streamID: msg.parent.id, + method: msg.method, + url: msg.url, + h: msg.header, + allocatePromisedID: allocatePromisedID, + }, + stream: msg.parent, + done: msg.done, + }) +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 7230 section 7 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2RequestHeaders(h http.Header) error { + for _, k := range connHeaders { + if _, ok := h[k]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", k) + } + } + te := h["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 +func ValidTrailerHeader(name string) bool { + name = http.CanonicalHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} + +// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives +// disabled. See comments on h1ServerShutdownChan above for why +// the code is written this way. +func h1ServerKeepAlivesDisabled(hs *http.Server) bool { + var x interface{} = hs + type I interface { + doKeepAlives() bool + } + if hs, ok := x.(I); ok { + return !hs.doKeepAlives() + } + return false +} diff --git a/vendor/golang.org/x/net/http2/server_push_test.go b/vendor/golang.org/x/net/http2/server_push_test.go new file mode 100644 index 000000000..918fd30dc --- /dev/null +++ b/vendor/golang.org/x/net/http2/server_push_test.go @@ -0,0 +1,521 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package http2 + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "sync" + "testing" + "time" +) + +func TestServer_Push_Success(t *testing.T) { + const ( + mainBody = "index page" + pushedBody = "pushed page" + userAgent = "testagent" + cookie = "testcookie" + ) + + var stURL string + checkPromisedReq := func(r *http.Request, wantMethod string, wantH http.Header) error { + if got, want := r.Method, wantMethod; got != want { + return fmt.Errorf("promised Req.Method=%q, want %q", got, want) + } + if got, want := r.Header, wantH; !reflect.DeepEqual(got, want) { + return fmt.Errorf("promised Req.Header=%q, want %q", got, want) + } + if got, want := "https://"+r.Host, stURL; got != want { + return fmt.Errorf("promised Req.Host=%q, want %q", got, want) + } + if r.Body == nil { + return fmt.Errorf("nil Body") + } + if buf, err := ioutil.ReadAll(r.Body); err != nil || len(buf) != 0 { + return fmt.Errorf("ReadAll(Body)=%q,%v, want '',nil", buf, err) + } + return nil + } + + errc := make(chan error, 3) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + // Push "/pushed?get" as a GET request, using an absolute URL. + opt := &http.PushOptions{ + Header: http.Header{ + "User-Agent": {userAgent}, + }, + } + if err := w.(http.Pusher).Push(stURL+"/pushed?get", opt); err != nil { + errc <- fmt.Errorf("error pushing /pushed?get: %v", err) + return + } + // Push "/pushed?head" as a HEAD request, using a path. + opt = &http.PushOptions{ + Method: "HEAD", + Header: http.Header{ + "User-Agent": {userAgent}, + "Cookie": {cookie}, + }, + } + if err := w.(http.Pusher).Push("/pushed?head", opt); err != nil { + errc <- fmt.Errorf("error pushing /pushed?head: %v", err) + return + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(mainBody))) + w.WriteHeader(200) + io.WriteString(w, mainBody) + errc <- nil + + case "/pushed?get": + wantH := http.Header{} + wantH.Set("User-Agent", userAgent) + if err := checkPromisedReq(r, "GET", wantH); err != nil { + errc <- fmt.Errorf("/pushed?get: %v", err) + return + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(pushedBody))) + w.WriteHeader(200) + io.WriteString(w, pushedBody) + errc <- nil + + case "/pushed?head": + wantH := http.Header{} + wantH.Set("User-Agent", userAgent) + wantH.Set("Cookie", cookie) + if err := checkPromisedReq(r, "HEAD", wantH); err != nil { + errc <- fmt.Errorf("/pushed?head: %v", err) + return + } + w.WriteHeader(204) + errc <- nil + + default: + errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) + } + }) + stURL = st.ts.URL + + // Send one request, which should push two responses. + st.greet() + getSlash(st) + for k := 0; k < 3; k++ { + select { + case <-time.After(2 * time.Second): + t.Errorf("timeout waiting for handler %d to finish", k) + case err := <-errc: + if err != nil { + t.Fatal(err) + } + } + } + + checkPushPromise := func(f Frame, promiseID uint32, wantH [][2]string) error { + pp, ok := f.(*PushPromiseFrame) + if !ok { + return fmt.Errorf("got a %T; want *PushPromiseFrame", f) + } + if !pp.HeadersEnded() { + return fmt.Errorf("want END_HEADERS flag in PushPromiseFrame") + } + if got, want := pp.PromiseID, promiseID; got != want { + return fmt.Errorf("got PromiseID %v; want %v", got, want) + } + gotH := st.decodeHeader(pp.HeaderBlockFragment()) + if !reflect.DeepEqual(gotH, wantH) { + return fmt.Errorf("got promised headers %v; want %v", gotH, wantH) + } + return nil + } + checkHeaders := func(f Frame, wantH [][2]string) error { + hf, ok := f.(*HeadersFrame) + if !ok { + return fmt.Errorf("got a %T; want *HeadersFrame", f) + } + gotH := st.decodeHeader(hf.HeaderBlockFragment()) + if !reflect.DeepEqual(gotH, wantH) { + return fmt.Errorf("got response headers %v; want %v", gotH, wantH) + } + return nil + } + checkData := func(f Frame, wantData string) error { + df, ok := f.(*DataFrame) + if !ok { + return fmt.Errorf("got a %T; want *DataFrame", f) + } + if gotData := string(df.Data()); gotData != wantData { + return fmt.Errorf("got response data %q; want %q", gotData, wantData) + } + return nil + } + + // Stream 1 has 2 PUSH_PROMISE + HEADERS + DATA + // Stream 2 has HEADERS + DATA + // Stream 4 has HEADERS + expected := map[uint32][]func(Frame) error{ + 1: { + func(f Frame) error { + return checkPushPromise(f, 2, [][2]string{ + {":method", "GET"}, + {":scheme", "https"}, + {":authority", st.ts.Listener.Addr().String()}, + {":path", "/pushed?get"}, + {"user-agent", userAgent}, + }) + }, + func(f Frame) error { + return checkPushPromise(f, 4, [][2]string{ + {":method", "HEAD"}, + {":scheme", "https"}, + {":authority", st.ts.Listener.Addr().String()}, + {":path", "/pushed?head"}, + {"cookie", cookie}, + {"user-agent", userAgent}, + }) + }, + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "200"}, + {"content-type", "text/html"}, + {"content-length", strconv.Itoa(len(mainBody))}, + }) + }, + func(f Frame) error { + return checkData(f, mainBody) + }, + }, + 2: { + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "200"}, + {"content-type", "text/html"}, + {"content-length", strconv.Itoa(len(pushedBody))}, + }) + }, + func(f Frame) error { + return checkData(f, pushedBody) + }, + }, + 4: { + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "204"}, + }) + }, + }, + } + + consumed := map[uint32]int{} + for k := 0; len(expected) > 0; k++ { + f, err := st.readFrame() + if err != nil { + for id, left := range expected { + t.Errorf("stream %d: missing %d frames", id, len(left)) + } + t.Fatalf("readFrame %d: %v", k, err) + } + id := f.Header().StreamID + label := fmt.Sprintf("stream %d, frame %d", id, consumed[id]) + if len(expected[id]) == 0 { + t.Fatalf("%s: unexpected frame %#+v", label, f) + } + check := expected[id][0] + expected[id] = expected[id][1:] + if len(expected[id]) == 0 { + delete(expected, id) + } + if err := check(f); err != nil { + t.Fatalf("%s: %v", label, err) + } + consumed[id]++ + } +} + +func TestServer_Push_SuccessNoRace(t *testing.T) { + // Regression test for issue #18326. Ensure the request handler can mutate + // pushed request headers without racing with the PUSH_PROMISE write. + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + opt := &http.PushOptions{ + Header: http.Header{"User-Agent": {"testagent"}}, + } + if err := w.(http.Pusher).Push("/pushed", opt); err != nil { + errc <- fmt.Errorf("error pushing: %v", err) + return + } + w.WriteHeader(200) + errc <- nil + + case "/pushed": + // Update request header, ensure there is no race. + r.Header.Set("User-Agent", "newagent") + r.Header.Set("Cookie", "cookie") + w.WriteHeader(200) + errc <- nil + + default: + errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) + } + }) + + // Send one request, which should push one response. + st.greet() + getSlash(st) + for k := 0; k < 2; k++ { + select { + case <-time.After(2 * time.Second): + t.Errorf("timeout waiting for handler %d to finish", k) + case err := <-errc: + if err != nil { + t.Fatal(err) + } + } + } +} + +func TestServer_Push_RejectRecursivePush(t *testing.T) { + // Expect two requests, but might get three if there's a bug and the second push succeeds. + errc := make(chan error, 3) + handler := func(w http.ResponseWriter, r *http.Request) error { + baseURL := "https://" + r.Host + switch r.URL.Path { + case "/": + if err := w.(http.Pusher).Push(baseURL+"/push1", nil); err != nil { + return fmt.Errorf("first Push()=%v, want nil", err) + } + return nil + + case "/push1": + if got, want := w.(http.Pusher).Push(baseURL+"/push2", nil), ErrRecursivePush; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + + default: + return fmt.Errorf("unexpected path: %q", r.URL.Path) + } + } + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + errc <- handler(w, r) + }) + defer st.Close() + st.greet() + getSlash(st) + if err := <-errc; err != nil { + t.Errorf("First request failed: %v", err) + } + if err := <-errc; err != nil { + t.Errorf("Second request failed: %v", err) + } +} + +func testServer_Push_RejectSingleRequest(t *testing.T, doPush func(http.Pusher, *http.Request) error, settings ...Setting) { + // Expect one request, but might get two if there's a bug and the push succeeds. + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + errc <- doPush(w.(http.Pusher), r) + }) + defer st.Close() + st.greet() + if err := st.fr.WriteSettings(settings...); err != nil { + st.t.Fatalf("WriteSettings: %v", err) + } + st.wantSettingsAck() + getSlash(st) + if err := <-errc; err != nil { + t.Error(err) + } + // Should not get a PUSH_PROMISE frame. + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Error("stream should end after headers") + } +} + +func TestServer_Push_RejectIfDisabled(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if got, want := p.Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + }, + Setting{SettingEnablePush, 0}) +} + +func TestServer_Push_RejectWhenNoConcurrentStreams(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if got, want := p.Push("https://"+r.Host+"/pushed", nil), ErrPushLimitReached; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + }, + Setting{SettingMaxConcurrentStreams, 0}) +} + +func TestServer_Push_RejectWrongScheme(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("http://"+r.Host+"/pushed", nil); err == nil { + return errors.New("Push() should have failed (push target URL is http)") + } + return nil + }) +} + +func TestServer_Push_RejectMissingHost(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("https:pushed", nil); err == nil { + return errors.New("Push() should have failed (push target URL missing host)") + } + return nil + }) +} + +func TestServer_Push_RejectRelativePath(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("../test", nil); err == nil { + return errors.New("Push() should have failed (push target is a relative path)") + } + return nil + }) +} + +func TestServer_Push_RejectForbiddenMethod(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Method: "POST"}); err == nil { + return errors.New("Push() should have failed (cannot promise a POST)") + } + return nil + }) +} + +func TestServer_Push_RejectForbiddenHeader(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + header := http.Header{ + "Content-Length": {"10"}, + "Content-Encoding": {"gzip"}, + "Trailer": {"Foo"}, + "Te": {"trailers"}, + "Host": {"test.com"}, + ":authority": {"test.com"}, + } + if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Header: header}); err == nil { + return errors.New("Push() should have failed (forbidden headers)") + } + return nil + }) +} + +func TestServer_Push_StateTransitions(t *testing.T) { + const body = "foo" + + gotPromise := make(chan bool) + finishedPush := make(chan bool) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + if err := w.(http.Pusher).Push("/pushed", nil); err != nil { + t.Errorf("Push error: %v", err) + } + // Don't finish this request until the push finishes so we don't + // nondeterministically interleave output frames with the push. + <-finishedPush + case "/pushed": + <-gotPromise + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(body))) + w.WriteHeader(200) + io.WriteString(w, body) + }) + defer st.Close() + + st.greet() + if st.stream(2) != nil { + t.Fatal("stream 2 should be empty") + } + if got, want := st.streamState(2), stateIdle; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + getSlash(st) + // After the PUSH_PROMISE is sent, the stream should be stateHalfClosedRemote. + st.wantPushPromise() + if got, want := st.streamState(2), stateHalfClosedRemote; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + // We stall the HTTP handler for "/pushed" until the above check. If we don't + // stall the handler, then the handler might write HEADERS and DATA and finish + // the stream before we check st.streamState(2) -- should that happen, we'll + // see stateClosed and fail the above check. + close(gotPromise) + st.wantHeaders() + if df := st.wantData(); !df.StreamEnded() { + t.Fatal("expected END_STREAM flag on DATA") + } + if got, want := st.streamState(2), stateClosed; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + close(finishedPush) +} + +func TestServer_Push_RejectAfterGoAway(t *testing.T) { + var readyOnce sync.Once + ready := make(chan struct{}) + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + select { + case <-ready: + case <-time.After(5 * time.Second): + errc <- fmt.Errorf("timeout waiting for GOAWAY to be processed") + } + if got, want := w.(http.Pusher).Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want { + errc <- fmt.Errorf("Push()=%v, want %v", got, want) + } + errc <- nil + }) + defer st.Close() + st.greet() + getSlash(st) + + // Send GOAWAY and wait for it to be processed. + st.fr.WriteGoAway(1, ErrCodeNo, nil) + go func() { + for { + select { + case <-ready: + return + default: + } + st.sc.serveMsgCh <- func(loopNum int) { + if !st.sc.pushEnabled { + readyOnce.Do(func() { close(ready) }) + } + } + } + }() + if err := <-errc; err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/net/http2/server_test.go b/vendor/golang.org/x/net/http2/server_test.go new file mode 100644 index 000000000..c5d8459c1 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server_test.go @@ -0,0 +1,3725 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/http2/hpack" +) + +var stderrVerbose = flag.Bool("stderr_verbose", false, "Mirror verbosity to stderr, unbuffered") + +func stderrv() io.Writer { + if *stderrVerbose { + return os.Stderr + } + + return ioutil.Discard +} + +type serverTester struct { + cc net.Conn // client conn + t testing.TB + ts *httptest.Server + fr *Framer + serverLogBuf bytes.Buffer // logger for httptest.Server + logFilter []string // substrings to filter out + scMu sync.Mutex // guards sc + sc *serverConn + hpackDec *hpack.Decoder + decodedHeaders [][2]string + + // If http2debug!=2, then we capture Frame debug logs that will be written + // to t.Log after a test fails. The read and write logs use separate locks + // and buffers so we don't accidentally introduce synchronization between + // the read and write goroutines, which may hide data races. + frameReadLogMu sync.Mutex + frameReadLogBuf bytes.Buffer + frameWriteLogMu sync.Mutex + frameWriteLogBuf bytes.Buffer + + // writing headers: + headerBuf bytes.Buffer + hpackEnc *hpack.Encoder +} + +func init() { + testHookOnPanicMu = new(sync.Mutex) + goAwayTimeout = 25 * time.Millisecond +} + +func resetHooks() { + testHookOnPanicMu.Lock() + testHookOnPanic = nil + testHookOnPanicMu.Unlock() +} + +type serverTesterOpt string + +var optOnlyServer = serverTesterOpt("only_server") +var optQuiet = serverTesterOpt("quiet_logging") +var optFramerReuseFrames = serverTesterOpt("frame_reuse_frames") + +func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester { + resetHooks() + + ts := httptest.NewUnstartedServer(handler) + + tlsConfig := &tls.Config{ + InsecureSkipVerify: true, + NextProtos: []string{NextProtoTLS}, + } + + var onlyServer, quiet, framerReuseFrames bool + h2server := new(Server) + for _, opt := range opts { + switch v := opt.(type) { + case func(*tls.Config): + v(tlsConfig) + case func(*httptest.Server): + v(ts) + case func(*Server): + v(h2server) + case serverTesterOpt: + switch v { + case optOnlyServer: + onlyServer = true + case optQuiet: + quiet = true + case optFramerReuseFrames: + framerReuseFrames = true + } + case func(net.Conn, http.ConnState): + ts.Config.ConnState = v + default: + t.Fatalf("unknown newServerTester option type %T", v) + } + } + + ConfigureServer(ts.Config, h2server) + + st := &serverTester{ + t: t, + ts: ts, + } + st.hpackEnc = hpack.NewEncoder(&st.headerBuf) + st.hpackDec = hpack.NewDecoder(initialHeaderTableSize, st.onHeaderField) + + ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config + if quiet { + ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + } else { + ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, &st.serverLogBuf), "", log.LstdFlags) + } + ts.StartTLS() + + if VerboseLogs { + t.Logf("Running test server at: %s", ts.URL) + } + testHookGetServerConn = func(v *serverConn) { + st.scMu.Lock() + defer st.scMu.Unlock() + st.sc = v + } + log.SetOutput(io.MultiWriter(stderrv(), twriter{t: t, st: st})) + if !onlyServer { + cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig) + if err != nil { + t.Fatal(err) + } + st.cc = cc + st.fr = NewFramer(cc, cc) + if framerReuseFrames { + st.fr.SetReuseFrames() + } + if !logFrameReads && !logFrameWrites { + st.fr.debugReadLoggerf = func(m string, v ...interface{}) { + m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" + st.frameReadLogMu.Lock() + fmt.Fprintf(&st.frameReadLogBuf, m, v...) + st.frameReadLogMu.Unlock() + } + st.fr.debugWriteLoggerf = func(m string, v ...interface{}) { + m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" + st.frameWriteLogMu.Lock() + fmt.Fprintf(&st.frameWriteLogBuf, m, v...) + st.frameWriteLogMu.Unlock() + } + st.fr.logReads = true + st.fr.logWrites = true + } + } + return st +} + +func (st *serverTester) closeConn() { + st.scMu.Lock() + defer st.scMu.Unlock() + st.sc.conn.Close() +} + +func (st *serverTester) addLogFilter(phrase string) { + st.logFilter = append(st.logFilter, phrase) +} + +func (st *serverTester) stream(id uint32) *stream { + ch := make(chan *stream, 1) + st.sc.serveMsgCh <- func(int) { + ch <- st.sc.streams[id] + } + return <-ch +} + +func (st *serverTester) streamState(id uint32) streamState { + ch := make(chan streamState, 1) + st.sc.serveMsgCh <- func(int) { + state, _ := st.sc.state(id) + ch <- state + } + return <-ch +} + +// loopNum reports how many times this conn's select loop has gone around. +func (st *serverTester) loopNum() int { + lastc := make(chan int, 1) + st.sc.serveMsgCh <- func(loopNum int) { + lastc <- loopNum + } + return <-lastc +} + +// awaitIdle heuristically awaits for the server conn's select loop to be idle. +// The heuristic is that the server connection's serve loop must schedule +// 50 times in a row without any channel sends or receives occurring. +func (st *serverTester) awaitIdle() { + remain := 50 + last := st.loopNum() + for remain > 0 { + n := st.loopNum() + if n == last+1 { + remain-- + } else { + remain = 50 + } + last = n + } +} + +func (st *serverTester) Close() { + if st.t.Failed() { + st.frameReadLogMu.Lock() + if st.frameReadLogBuf.Len() > 0 { + st.t.Logf("Framer read log:\n%s", st.frameReadLogBuf.String()) + } + st.frameReadLogMu.Unlock() + + st.frameWriteLogMu.Lock() + if st.frameWriteLogBuf.Len() > 0 { + st.t.Logf("Framer write log:\n%s", st.frameWriteLogBuf.String()) + } + st.frameWriteLogMu.Unlock() + + // If we failed already (and are likely in a Fatal, + // unwindowing), force close the connection, so the + // httptest.Server doesn't wait forever for the conn + // to close. + if st.cc != nil { + st.cc.Close() + } + } + st.ts.Close() + if st.cc != nil { + st.cc.Close() + } + log.SetOutput(os.Stderr) +} + +// greet initiates the client's HTTP/2 connection into a state where +// frames may be sent. +func (st *serverTester) greet() { + st.greetAndCheckSettings(func(Setting) error { return nil }) +} + +func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error) { + st.writePreface() + st.writeInitialSettings() + st.wantSettings().ForeachSetting(checkSetting) + st.writeSettingsAck() + + // The initial WINDOW_UPDATE and SETTINGS ACK can come in any order. + var gotSettingsAck bool + var gotWindowUpdate bool + + for i := 0; i < 2; i++ { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + switch f := f.(type) { + case *SettingsFrame: + if !f.Header().Flags.Has(FlagSettingsAck) { + st.t.Fatal("Settings Frame didn't have ACK set") + } + gotSettingsAck = true + + case *WindowUpdateFrame: + if f.FrameHeader.StreamID != 0 { + st.t.Fatalf("WindowUpdate StreamID = %d; want 0", f.FrameHeader.StreamID) + } + incr := uint32((&Server{}).initialConnRecvWindowSize() - initialWindowSize) + if f.Increment != incr { + st.t.Fatalf("WindowUpdate increment = %d; want %d", f.Increment, incr) + } + gotWindowUpdate = true + + default: + st.t.Fatalf("Wanting a settings ACK or window update, received a %T", f) + } + } + + if !gotSettingsAck { + st.t.Fatalf("Didn't get a settings ACK") + } + if !gotWindowUpdate { + st.t.Fatalf("Didn't get a window update") + } +} + +func (st *serverTester) writePreface() { + n, err := st.cc.Write(clientPreface) + if err != nil { + st.t.Fatalf("Error writing client preface: %v", err) + } + if n != len(clientPreface) { + st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(clientPreface)) + } +} + +func (st *serverTester) writeInitialSettings() { + if err := st.fr.WriteSettings(); err != nil { + st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) + } +} + +func (st *serverTester) writeSettingsAck() { + if err := st.fr.WriteSettingsAck(); err != nil { + st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err) + } +} + +func (st *serverTester) writeHeaders(p HeadersFrameParam) { + if err := st.fr.WriteHeaders(p); err != nil { + st.t.Fatalf("Error writing HEADERS: %v", err) + } +} + +func (st *serverTester) writePriority(id uint32, p PriorityParam) { + if err := st.fr.WritePriority(id, p); err != nil { + st.t.Fatalf("Error writing PRIORITY: %v", err) + } +} + +func (st *serverTester) encodeHeaderField(k, v string) { + err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } +} + +// encodeHeaderRaw is the magic-free version of encodeHeader. +// It takes 0 or more (k, v) pairs and encodes them. +func (st *serverTester) encodeHeaderRaw(headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + st.headerBuf.Reset() + for len(headers) > 0 { + k, v := headers[0], headers[1] + st.encodeHeaderField(k, v) + headers = headers[2:] + } + return st.headerBuf.Bytes() +} + +// encodeHeader encodes headers and returns their HPACK bytes. headers +// must contain an even number of key/value pairs. There may be +// multiple pairs for keys (e.g. "cookie"). The :method, :path, and +// :scheme headers default to GET, / and https. The :authority header +// defaults to st.ts.Listener.Addr(). +func (st *serverTester) encodeHeader(headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + + st.headerBuf.Reset() + defaultAuthority := st.ts.Listener.Addr().String() + + if len(headers) == 0 { + // Fast path, mostly for benchmarks, so test code doesn't pollute + // profiles when we're looking to improve server allocations. + st.encodeHeaderField(":method", "GET") + st.encodeHeaderField(":scheme", "https") + st.encodeHeaderField(":authority", defaultAuthority) + st.encodeHeaderField(":path", "/") + return st.headerBuf.Bytes() + } + + if len(headers) == 2 && headers[0] == ":method" { + // Another fast path for benchmarks. + st.encodeHeaderField(":method", headers[1]) + st.encodeHeaderField(":scheme", "https") + st.encodeHeaderField(":authority", defaultAuthority) + st.encodeHeaderField(":path", "/") + return st.headerBuf.Bytes() + } + + pseudoCount := map[string]int{} + keys := []string{":method", ":scheme", ":authority", ":path"} + vals := map[string][]string{ + ":method": {"GET"}, + ":scheme": {"https"}, + ":authority": {defaultAuthority}, + ":path": {"/"}, + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if _, ok := vals[k]; !ok { + keys = append(keys, k) + } + if strings.HasPrefix(k, ":") { + pseudoCount[k]++ + if pseudoCount[k] == 1 { + vals[k] = []string{v} + } else { + // Allows testing of invalid headers w/ dup pseudo fields. + vals[k] = append(vals[k], v) + } + } else { + vals[k] = append(vals[k], v) + } + } + for _, k := range keys { + for _, v := range vals[k] { + st.encodeHeaderField(k, v) + } + } + return st.headerBuf.Bytes() +} + +// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set. +func (st *serverTester) bodylessReq1(headers ...string) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(headers...), + EndStream: true, + EndHeaders: true, + }) +} + +func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) { + if err := st.fr.WriteData(streamID, endStream, data); err != nil { + st.t.Fatalf("Error writing DATA: %v", err) + } +} + +func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) { + if err := st.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil { + st.t.Fatalf("Error writing DATA: %v", err) + } +} + +func readFrameTimeout(fr *Framer, wait time.Duration) (Frame, error) { + ch := make(chan interface{}, 1) + go func() { + fr, err := fr.ReadFrame() + if err != nil { + ch <- err + } else { + ch <- fr + } + }() + t := time.NewTimer(wait) + select { + case v := <-ch: + t.Stop() + if fr, ok := v.(Frame); ok { + return fr, nil + } + return nil, v.(error) + case <-t.C: + return nil, errors.New("timeout waiting for frame") + } +} + +func (st *serverTester) readFrame() (Frame, error) { + return readFrameTimeout(st.fr, 2*time.Second) +} + +func (st *serverTester) wantHeaders() *HeadersFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a HEADERS frame: %v", err) + } + hf, ok := f.(*HeadersFrame) + if !ok { + st.t.Fatalf("got a %T; want *HeadersFrame", f) + } + return hf +} + +func (st *serverTester) wantContinuation() *ContinuationFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err) + } + cf, ok := f.(*ContinuationFrame) + if !ok { + st.t.Fatalf("got a %T; want *ContinuationFrame", f) + } + return cf +} + +func (st *serverTester) wantData() *DataFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a DATA frame: %v", err) + } + df, ok := f.(*DataFrame) + if !ok { + st.t.Fatalf("got a %T; want *DataFrame", f) + } + return df +} + +func (st *serverTester) wantSettings() *SettingsFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err) + } + sf, ok := f.(*SettingsFrame) + if !ok { + st.t.Fatalf("got a %T; want *SettingsFrame", f) + } + return sf +} + +func (st *serverTester) wantPing() *PingFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a PING frame: %v", err) + } + pf, ok := f.(*PingFrame) + if !ok { + st.t.Fatalf("got a %T; want *PingFrame", f) + } + return pf +} + +func (st *serverTester) wantGoAway() *GoAwayFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err) + } + gf, ok := f.(*GoAwayFrame) + if !ok { + st.t.Fatalf("got a %T; want *GoAwayFrame", f) + } + return gf +} + +func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting an RSTStream frame: %v", err) + } + rs, ok := f.(*RSTStreamFrame) + if !ok { + st.t.Fatalf("got a %T; want *RSTStreamFrame", f) + } + if rs.FrameHeader.StreamID != streamID { + st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID) + } + if rs.ErrCode != errCode { + st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode) + } +} + +func (st *serverTester) wantWindowUpdate(streamID, incr uint32) { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err) + } + wu, ok := f.(*WindowUpdateFrame) + if !ok { + st.t.Fatalf("got a %T; want *WindowUpdateFrame", f) + } + if wu.FrameHeader.StreamID != streamID { + st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID) + } + if wu.Increment != incr { + st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr) + } +} + +func (st *serverTester) wantSettingsAck() { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + sf, ok := f.(*SettingsFrame) + if !ok { + st.t.Fatalf("Wanting a settings ACK, received a %T", f) + } + if !sf.Header().Flags.Has(FlagSettingsAck) { + st.t.Fatal("Settings Frame didn't have ACK set") + } +} + +func (st *serverTester) wantPushPromise() *PushPromiseFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + ppf, ok := f.(*PushPromiseFrame) + if !ok { + st.t.Fatalf("Wanted PushPromise, received %T", ppf) + } + return ppf +} + +func TestServer(t *testing.T) { + gotReq := make(chan bool, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Foo", "Bar") + gotReq <- true + }) + defer st.Close() + + covers("3.5", ` + The server connection preface consists of a potentially empty + SETTINGS frame ([SETTINGS]) that MUST be the first frame the + server sends in the HTTP/2 connection. + `) + + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + + select { + case <-gotReq: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for request") + } +} + +func TestServer_Request_Get(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader("foo-bar", "some-value"), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Method != "GET" { + t.Errorf("Method = %q; want GET", r.Method) + } + if r.URL.Path != "/" { + t.Errorf("URL.Path = %q; want /", r.URL.Path) + } + if r.ContentLength != 0 { + t.Errorf("ContentLength = %v; want 0", r.ContentLength) + } + if r.Close { + t.Error("Close = true; want false") + } + if !strings.Contains(r.RemoteAddr, ":") { + t.Errorf("RemoteAddr = %q; want something with a colon", r.RemoteAddr) + } + if r.Proto != "HTTP/2.0" || r.ProtoMajor != 2 || r.ProtoMinor != 0 { + t.Errorf("Proto = %q Major=%v,Minor=%v; want HTTP/2.0", r.Proto, r.ProtoMajor, r.ProtoMinor) + } + wantHeader := http.Header{ + "Foo-Bar": []string{"some-value"}, + } + if !reflect.DeepEqual(r.Header, wantHeader) { + t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) + } + if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { + t.Errorf("Read = %d, %v; want 0, EOF", n, err) + } + }) +} + +func TestServer_Request_Get_PathSlashes(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":path", "/%2f/"), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.RequestURI != "/%2f/" { + t.Errorf("RequestURI = %q; want /%%2f/", r.RequestURI) + } + if r.URL.Path != "///" { + t.Errorf("URL.Path = %q; want ///", r.URL.Path) + } + }) +} + +// TODO: add a test with EndStream=true on the HEADERS but setting a +// Content-Length anyway. Should we just omit it and force it to +// zero? + +func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != 0 { + t.Errorf("ContentLength = %v; want 0", r.ContentLength) + } + if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { + t.Errorf("Read = %d, %v; want 0, EOF", n, err) + } + }) +} + +func TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) { + testBodyContents(t, -1, "", func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, nil) // just kidding. empty body. + }) +} + +func TestServer_Request_Post_Body_OneData(t *testing.T) { + const content = "Some content" + testBodyContents(t, -1, content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte(content)) + }) +} + +func TestServer_Request_Post_Body_TwoData(t *testing.T) { + const content = "Some content" + testBodyContents(t, -1, content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, false, []byte(content[:5])) + st.writeData(1, true, []byte(content[5:])) + }) +} + +func TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) { + const content = "Some content" + testBodyContents(t, int64(len(content)), content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", strconv.Itoa(len(content)), + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte(content)) + }) +} + +func TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) { + testBodyContentsFail(t, 3, "request declared a Content-Length of 3 but only wrote 2 bytes", + func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", "3", + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte("12")) + }) +} + +func TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) { + testBodyContentsFail(t, 4, "sender tried to send more than declared Content-Length of 4 bytes", + func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", "4", + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte("12345")) + }) +} + +func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) { + testServerRequest(t, write, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != wantContentLength { + t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) + } + all, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(all) != wantBody { + t.Errorf("Read = %q; want %q", all, wantBody) + } + if err := r.Body.Close(); err != nil { + t.Fatalf("Close: %v", err) + } + }) +} + +func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) { + testServerRequest(t, write, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != wantContentLength { + t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) + } + all, err := ioutil.ReadAll(r.Body) + if err == nil { + t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.", + wantReadError, all) + } + if !strings.Contains(err.Error(), wantReadError) { + t.Fatalf("Body.Read = %v; want substring %q", err, wantReadError) + } + if err := r.Body.Close(); err != nil { + t.Fatalf("Close: %v", err) + } + }) +} + +// Using a Host header, instead of :authority +func TestServer_Request_Get_Host(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":authority", "", "host", host), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Host != host { + t.Errorf("Host = %q; want %q", r.Host, host) + } + }) +} + +// Using an :authority pseudo-header, instead of Host +func TestServer_Request_Get_Authority(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":authority", host), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Host != host { + t.Errorf("Host = %q; want %q", r.Host, host) + } + }) +} + +func TestServer_Request_WithContinuation(t *testing.T) { + wantHeader := http.Header{ + "Foo-One": []string{"value-one"}, + "Foo-Two": []string{"value-two"}, + "Foo-Three": []string{"value-three"}, + } + testServerRequest(t, func(st *serverTester) { + fullHeaders := st.encodeHeader( + "foo-one", "value-one", + "foo-two", "value-two", + "foo-three", "value-three", + ) + remain := fullHeaders + chunks := 0 + for len(remain) > 0 { + const maxChunkSize = 5 + chunk := remain + if len(chunk) > maxChunkSize { + chunk = chunk[:maxChunkSize] + } + remain = remain[len(chunk):] + + if chunks == 0 { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: chunk, + EndStream: true, // no DATA frames + EndHeaders: false, // we'll have continuation frames + }) + } else { + err := st.fr.WriteContinuation(1, len(remain) == 0, chunk) + if err != nil { + t.Fatal(err) + } + } + chunks++ + } + if chunks < 2 { + t.Fatal("too few chunks") + } + }, func(r *http.Request) { + if !reflect.DeepEqual(r.Header, wantHeader) { + t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) + } + }) +} + +// Concatenated cookie headers. ("8.1.2.5 Compressing the Cookie Header Field") +func TestServer_Request_CookieConcat(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.bodylessReq1( + ":authority", host, + "cookie", "a=b", + "cookie", "c=d", + "cookie", "e=f", + ) + }, func(r *http.Request) { + const want = "a=b; c=d; e=f" + if got := r.Header.Get("Cookie"); got != want { + t.Errorf("Cookie = %q; want %q", got, want) + } + }) +} + +func TestServer_Request_Reject_CapitalHeader(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("UPPER", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameColon(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has:colon", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameNULL(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has\x00null", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameEmpty(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldValueNewline(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\nnewline") }) +} + +func TestServer_Request_Reject_HeaderFieldValueCR(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\rcarriage") }) +} + +func TestServer_Request_Reject_HeaderFieldValueDEL(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\x7fdel") }) +} + +func TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":method", "") }) +} + +func TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) { + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid value" ... + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter("duplicate pseudo-header") + st.bodylessReq1(":method", "GET", ":method", "POST") + }) +} + +func TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) { + // 8.1.2.3 Request Pseudo-Header Fields + // "All pseudo-header fields MUST appear in the header block + // before regular header fields. Any request or response that + // contains a pseudo-header field that appears in a header + // block after a regular header field MUST be treated as + // malformed (Section 8.1.2.6)." + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter("pseudo-header after regular header") + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":method", Value: "GET"}) + enc.WriteField(hpack.HeaderField{Name: "regular", Value: "foobar"}) + enc.WriteField(hpack.HeaderField{Name: ":path", Value: "/"}) + enc.WriteField(hpack.HeaderField{Name: ":scheme", Value: "https"}) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: buf.Bytes(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":path", "") }) +} + +func TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "") }) +} + +func TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "bogus") }) +} + +func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter(`invalid pseudo-header ":unknown_thing"`) + st.bodylessReq1(":unknown_thing", "") + }) +} + +func testRejectRequest(t *testing.T, send func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Error("server request made it to handler; should've been rejected") + }) + defer st.Close() + + st.greet() + send(st) + st.wantRSTStream(1, ErrCodeProtocol) +} + +func testRejectRequestWithProtocolError(t *testing.T, send func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Error("server request made it to handler; should've been rejected") + }, optQuiet) + defer st.Close() + + st.greet() + send(st) + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeProtocol { + t.Errorf("err code = %v; want %v", gf.ErrCode, ErrCodeProtocol) + } +} + +// Section 5.1, on idle connections: "Receiving any frame other than +// HEADERS or PRIORITY on a stream in this state MUST be treated as a +// connection error (Section 5.4.1) of type PROTOCOL_ERROR." +func TestRejectFrameOnIdle_WindowUpdate(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteWindowUpdate(123, 456) + }) +} +func TestRejectFrameOnIdle_Data(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteData(123, true, nil) + }) +} +func TestRejectFrameOnIdle_RSTStream(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteRSTStream(123, ErrCodeCancel) + }) +} + +func TestServer_Request_Connect(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if g, w := r.Method, "CONNECT"; g != w { + t.Errorf("Method = %q; want %q", g, w) + } + if g, w := r.RequestURI, "example.com:123"; g != w { + t.Errorf("RequestURI = %q; want %q", g, w) + } + if g, w := r.URL.Host, "example.com:123"; g != w { + t.Errorf("URL.Host = %q; want %q", g, w) + } + }) +} + +func TestServer_Request_Connect_InvalidPath(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ":path", "/bogus", + ), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Request_Connect_InvalidScheme(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ":scheme", "https", + ), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Ping(t *testing.T) { + st := newServerTester(t, nil) + defer st.Close() + st.greet() + + // Server should ignore this one, since it has ACK set. + ackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128} + if err := st.fr.WritePing(true, ackPingData); err != nil { + t.Fatal(err) + } + + // But the server should reply to this one, since ACK is false. + pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8} + if err := st.fr.WritePing(false, pingData); err != nil { + t.Fatal(err) + } + + pf := st.wantPing() + if !pf.Flags.Has(FlagPingAck) { + t.Error("response ping doesn't have ACK set") + } + if pf.Data != pingData { + t.Errorf("response ping has data %q; want %q", pf.Data, pingData) + } +} + +func TestServer_RejectsLargeFrames(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("see golang.org/issue/13434") + } + + st := newServerTester(t, nil) + defer st.Close() + st.greet() + + // Write too large of a frame (too large by one byte) + // We ignore the return value because it's expected that the server + // will only read the first 9 bytes (the headre) and then disconnect. + st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1)) + + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeFrameSize { + t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize) + } + if st.serverLogBuf.Len() != 0 { + // Previously we spun here for a bit until the GOAWAY disconnect + // timer fired, logging while we fired. + t.Errorf("unexpected server output: %.500s\n", st.serverLogBuf.Bytes()) + } +} + +func TestServer_Handler_Sends_WindowUpdate(t *testing.T) { + puppet := newHandlerPuppet() + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + puppet.act(w, r) + }) + defer st.Close() + defer puppet.done() + + st.greet() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // data coming + EndHeaders: true, + }) + st.writeData(1, false, []byte("abcdef")) + puppet.do(readBodyHandler(t, "abc")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + puppet.do(readBodyHandler(t, "def")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + st.writeData(1, true, []byte("ghijkl")) // END_STREAM here + puppet.do(readBodyHandler(t, "ghi")) + puppet.do(readBodyHandler(t, "jkl")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM +} + +// the version of the TestServer_Handler_Sends_WindowUpdate with padding. +// See golang.org/issue/16556 +func TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) { + puppet := newHandlerPuppet() + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + puppet.act(w, r) + }) + defer st.Close() + defer puppet.done() + + st.greet() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + st.writeDataPadded(1, false, []byte("abcdef"), []byte{0, 0, 0, 0}) + + // Expect to immediately get our 5 bytes of padding back for + // both the connection and stream (4 bytes of padding + 1 byte of length) + st.wantWindowUpdate(0, 5) + st.wantWindowUpdate(1, 5) + + puppet.do(readBodyHandler(t, "abc")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + puppet.do(readBodyHandler(t, "def")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) +} + +func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) { + st := newServerTester(t, nil) + defer st.Close() + st.greet() + if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil { + t.Fatal(err) + } + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeFlowControl { + t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl) + } + if gf.LastStreamID != 0 { + t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0) + } +} + +func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) { + inHandler := make(chan bool) + blockHandler := make(chan bool) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + <-blockHandler + }) + defer st.Close() + defer close(blockHandler) + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + // Send a bogus window update: + if err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil { + t.Fatal(err) + } + st.wantRSTStream(1, ErrCodeFlowControl) +} + +// testServerPostUnblock sends a hanging POST with unsent data to handler, +// then runs fn once in the handler, and verifies that the error returned from +// handler is acceptable. It fails if takes over 5 seconds for handler to exit. +func testServerPostUnblock(t *testing.T, + handler func(http.ResponseWriter, *http.Request) error, + fn func(*serverTester), + checkErr func(error), + otherHeaders ...string) { + inHandler := make(chan bool) + errc := make(chan error, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + errc <- handler(w, r) + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(append([]string{":method", "POST"}, otherHeaders...)...), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + fn(st) + select { + case err := <-errc: + if checkErr != nil { + checkErr(err) + } + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for Handler to return") + } +} + +func TestServer_RSTStream_Unblocks_Read(t *testing.T) { + testServerPostUnblock(t, + func(w http.ResponseWriter, r *http.Request) (err error) { + _, err = r.Body.Read(make([]byte, 1)) + return + }, + func(st *serverTester) { + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }, + func(err error) { + want := StreamError{StreamID: 0x1, Code: 0x8} + if !reflect.DeepEqual(err, want) { + t.Errorf("Read error = %v; want %v", err, want) + } + }, + ) +} + +func TestServer_RSTStream_Unblocks_Header_Write(t *testing.T) { + // Run this test a bunch, because it doesn't always + // deadlock. But with a bunch, it did. + n := 50 + if testing.Short() { + n = 5 + } + for i := 0; i < n; i++ { + testServer_RSTStream_Unblocks_Header_Write(t) + } +} + +func testServer_RSTStream_Unblocks_Header_Write(t *testing.T) { + inHandler := make(chan bool, 1) + unblockHandler := make(chan bool, 1) + headerWritten := make(chan bool, 1) + wroteRST := make(chan bool, 1) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + <-wroteRST + w.Header().Set("foo", "bar") + w.WriteHeader(200) + w.(http.Flusher).Flush() + headerWritten <- true + <-unblockHandler + }) + defer st.Close() + + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + wroteRST <- true + st.awaitIdle() + select { + case <-headerWritten: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for header write") + } + unblockHandler <- true +} + +func TestServer_DeadConn_Unblocks_Read(t *testing.T) { + testServerPostUnblock(t, + func(w http.ResponseWriter, r *http.Request) (err error) { + _, err = r.Body.Read(make([]byte, 1)) + return + }, + func(st *serverTester) { st.cc.Close() }, + func(err error) { + if err == nil { + t.Error("unexpected nil error from Request.Body.Read") + } + }, + ) +} + +var blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error { + <-w.(http.CloseNotifier).CloseNotify() + return nil +} + +func TestServer_CloseNotify_After_RSTStream(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }, nil) +} + +func TestServer_CloseNotify_After_ConnClose(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil) +} + +// that CloseNotify unblocks after a stream error due to the client's +// problem that's unrelated to them explicitly canceling it (which is +// TestServer_CloseNotify_After_RSTStream above) +func TestServer_CloseNotify_After_StreamError(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { + // data longer than declared Content-Length => stream error + st.writeData(1, true, []byte("1234")) + }, nil, "content-length", "3") +} + +func TestServer_StateTransitions(t *testing.T) { + var st *serverTester + inHandler := make(chan bool) + writeData := make(chan bool) + leaveHandler := make(chan bool) + st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + if st.stream(1) == nil { + t.Errorf("nil stream 1 in handler") + } + if got, want := st.streamState(1), stateOpen; got != want { + t.Errorf("in handler, state is %v; want %v", got, want) + } + writeData <- true + if n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF { + t.Errorf("body read = %d, %v; want 0, EOF", n, err) + } + if got, want := st.streamState(1), stateHalfClosedRemote; got != want { + t.Errorf("in handler, state is %v; want %v", got, want) + } + + <-leaveHandler + }) + st.greet() + if st.stream(1) != nil { + t.Fatal("stream 1 should be empty") + } + if got := st.streamState(1); got != stateIdle { + t.Fatalf("stream 1 should be idle; got %v", got) + } + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + <-writeData + st.writeData(1, true, nil) + + leaveHandler <- true + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("expected END_STREAM flag") + } + + if got, want := st.streamState(1), stateClosed; got != want { + t.Errorf("at end, state is %v; want %v", got, want) + } + if st.stream(1) != nil { + t.Fatal("at end, stream 1 should be gone") + } +} + +// test HEADERS w/o EndHeaders + another HEADERS (should get rejected) +func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + st.writeHeaders(HeadersFrameParam{ // Not a continuation. + StreamID: 3, // different stream. + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +// test HEADERS w/o EndHeaders + PING (should get rejected) +func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + if err := st.fr.WritePing(false, [8]byte{}); err != nil { + t.Fatal(err) + } + }) +} + +// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected) +func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { + t.Fatal(err) + } + }) +} + +// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID +func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { + t.Fatal(err) + } + }) +} + +// No HEADERS on stream 0. +func TestServer_Rejects_Headers0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writeHeaders(HeadersFrameParam{ + StreamID: 0, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +// No CONTINUATION on stream 0. +func TestServer_Rejects_Continuation0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil { + t.Fatal(err) + } + }) +} + +// No PRIORITY on stream 0. +func TestServer_Rejects_Priority0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writePriority(0, PriorityParam{StreamDep: 1}) + }) +} + +// No HEADERS frame with a self-dependence. +func TestServer_Rejects_HeadersSelfDependence(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + Priority: PriorityParam{StreamDep: 1}, + }) + }) +} + +// No PRIORTY frame with a self-dependence. +func TestServer_Rejects_PrioritySelfDependence(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writePriority(1, PriorityParam{StreamDep: 1}) + }) +} + +func TestServer_Rejects_PushPromise(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + pp := PushPromiseParam{ + StreamID: 1, + PromiseID: 3, + } + if err := st.fr.WritePushPromise(pp); err != nil { + t.Fatal(err) + } + }) +} + +// testServerRejectsConn tests that the server hangs up with a GOAWAY +// frame and a server close after the client does something +// deserving a CONNECTION_ERROR. +func testServerRejectsConn(t *testing.T, writeReq func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + st.addLogFilter("connection error: PROTOCOL_ERROR") + defer st.Close() + st.greet() + writeReq(st) + + st.wantGoAway() + errc := make(chan error, 1) + go func() { + fr, err := st.fr.ReadFrame() + if err == nil { + err = fmt.Errorf("got frame of type %T", fr) + } + errc <- err + }() + select { + case err := <-errc: + if err != io.EOF { + t.Errorf("ReadFrame = %v; want io.EOF", err) + } + case <-time.After(2 * time.Second): + t.Error("timeout waiting for disconnect") + } +} + +// testServerRejectsStream tests that the server sends a RST_STREAM with the provided +// error code after a client sends a bogus request. +func testServerRejectsStream(t *testing.T, code ErrCode, writeReq func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + defer st.Close() + st.greet() + writeReq(st) + st.wantRSTStream(1, code) +} + +// testServerRequest sets up an idle HTTP/2 connection and lets you +// write a single request with writeReq, and then verify that the +// *http.Request is built correctly in checkReq. +func testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) { + gotReq := make(chan bool, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if r.Body == nil { + t.Fatal("nil Body") + } + checkReq(r) + gotReq <- true + }) + defer st.Close() + + st.greet() + writeReq(st) + + select { + case <-gotReq: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for request") + } +} + +func getSlash(st *serverTester) { st.bodylessReq1() } + +func TestServer_Response_NoData(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + // Nothing. + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("want END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + }) +} + +func TestServer_Response_NoData_Header_FooBar(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Foo-Bar", "some-value") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("want END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo-bar", "some-value"}, + {"content-length", "0"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Content-Type", "foo/bar") + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("don't want END_STREAM, expecting data") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "foo/bar"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + df := st.wantData() + if !df.StreamEnded() { + t.Error("expected DATA to have END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + }) +} + +func TestServer_Response_TransferEncoding_chunked(t *testing.T) { + const msg = "hi" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Transfer-Encoding", "chunked") // should be stripped + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +// Header accessed only after the initial write. +func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + w.Header().Set("foo", "should be ignored") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +// Header accessed before the initial write and later mutated. +func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("foo", "proper value") + io.WriteString(w, msg) + w.Header().Set("foo", "should be ignored") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo", "proper value"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +func TestServer_Response_Data_SniffLenType(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("don't want END_STREAM, expecting data") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + df := st.wantData() + if !df.StreamEnded() { + t.Error("expected DATA to have END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + }) +} + +func TestServer_Response_Header_Flush_MidWrite(t *testing.T) { + const msg = "this is HTML" + const msg2 = ", and this is the next chunk" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + io.WriteString(w, msg2) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, // sniffed + // and no content-length + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + { + df := st.wantData() + if df.StreamEnded() { + t.Error("unexpected END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + } + { + df := st.wantData() + if !df.StreamEnded() { + t.Error("wanted END_STREAM flag on last data chunk") + } + if got := string(df.Data()); got != msg2 { + t.Errorf("got DATA %q; want %q", got, msg2) + } + } + }) +} + +func TestServer_Response_LargeWrite(t *testing.T) { + const size = 1 << 20 + const maxFrameSize = 16 << 10 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + n, err := w.Write(bytes.Repeat([]byte("a"), size)) + if err != nil { + return fmt.Errorf("Write error: %v", err) + } + if n != size { + return fmt.Errorf("wrong size %d from Write", n) + } + return nil + }, func(st *serverTester) { + if err := st.fr.WriteSettings( + Setting{SettingInitialWindowSize, 0}, + Setting{SettingMaxFrameSize, maxFrameSize}, + ); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + // Give the handler quota to write: + if err := st.fr.WriteWindowUpdate(1, size); err != nil { + t.Fatal(err) + } + // Give the handler quota to write to connection-level + // window as well + if err := st.fr.WriteWindowUpdate(0, size); err != nil { + t.Fatal(err) + } + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, // sniffed + // and no content-length + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + var bytes, frames int + for { + df := st.wantData() + bytes += len(df.Data()) + frames++ + for _, b := range df.Data() { + if b != 'a' { + t.Fatal("non-'a' byte seen in DATA") + } + } + if df.StreamEnded() { + break + } + } + if bytes != size { + t.Errorf("Got %d bytes; want %d", bytes, size) + } + if want := int(size / maxFrameSize); frames < want || frames > want*2 { + t.Errorf("Got %d frames; want %d", frames, size) + } + }) +} + +// Test that the handler can't write more than the client allows +func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) { + // Make these reads. Before each read, the client adds exactly enough + // flow-control to satisfy the read. Numbers chosen arbitrarily. + reads := []int{123, 1, 13, 127} + size := 0 + for _, n := range reads { + size += n + } + + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + n, err := w.Write(bytes.Repeat([]byte("a"), size)) + if err != nil { + return fmt.Errorf("Write error: %v", err) + } + if n != size { + return fmt.Errorf("wrong size %d from Write", n) + } + return nil + }, func(st *serverTester) { + // Set the window size to something explicit for this test. + // It's also how much initial data we expect. + if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, uint32(reads[0])}); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + df := st.wantData() + if got := len(df.Data()); got != reads[0] { + t.Fatalf("Initial window size = %d but got DATA with %d bytes", reads[0], got) + } + + for _, quota := range reads[1:] { + if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil { + t.Fatal(err) + } + df := st.wantData() + if int(quota) != len(df.Data()) { + t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota) + } + } + }) +} + +// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM. +func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) { + const size = 1 << 20 + const maxFrameSize = 16 << 10 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + errc := make(chan error, 1) + go func() { + _, err := w.Write(bytes.Repeat([]byte("a"), size)) + errc <- err + }() + select { + case err := <-errc: + if err == nil { + return errors.New("unexpected nil error from Write in handler") + } + return nil + case <-time.After(2 * time.Second): + return errors.New("timeout waiting for Write in handler") + } + }, func(st *serverTester) { + if err := st.fr.WriteSettings( + Setting{SettingInitialWindowSize, 0}, + Setting{SettingMaxFrameSize, maxFrameSize}, + ); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }) +} + +func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + // Nothing; send empty DATA + return nil + }, func(st *serverTester) { + // Handler gets no data quota: + if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + df := st.wantData() + if got := len(df.Data()); got != 0 { + t.Fatalf("unexpected %d DATA bytes; want 0", got) + } + if !df.StreamEnded() { + t.Fatal("DATA didn't have END_STREAM") + } + }) +} + +func TestServer_Response_Automatic100Continue(t *testing.T) { + const msg = "foo" + const reply = "bar" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + if v := r.Header.Get("Expect"); v != "" { + t.Errorf("Expect header = %q; want empty", v) + } + buf := make([]byte, len(msg)) + // This read should trigger the 100-continue being sent. + if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg { + return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg) + } + _, err := io.WriteString(w, reply) + return err + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"), + EndStream: false, + EndHeaders: true, + }) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "100"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Fatalf("Got headers %v; want %v", goth, wanth) + } + + // Okay, they sent status 100, so we can send our + // gigantic and/or sensitive "foo" payload now. + st.writeData(1, true, []byte(msg)) + + st.wantWindowUpdate(0, uint32(len(msg))) + + hf = st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("expected data to follow") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth = st.decodeHeader(hf.HeaderBlockFragment()) + wanth = [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", strconv.Itoa(len(reply))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + + df := st.wantData() + if string(df.Data()) != reply { + t.Errorf("Client read %q; want %q", df.Data(), reply) + } + if !df.StreamEnded() { + t.Errorf("expect data stream end") + } + }) +} + +func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) { + errc := make(chan error, 1) + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + p := []byte("some data.\n") + for { + _, err := w.Write(p) + if err != nil { + errc <- err + return nil + } + } + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: false, + EndHeaders: true, + }) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + // Close the connection and wait for the handler to (hopefully) notice. + st.cc.Close() + select { + case <-errc: + case <-time.After(5 * time.Second): + t.Error("timeout") + } + }) +} + +func TestServer_Rejects_Too_Many_Streams(t *testing.T) { + const testPath = "/some/path" + + inHandler := make(chan uint32) + leaveHandler := make(chan bool) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + id := w.(*responseWriter).rws.stream.id + inHandler <- id + if id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath { + t.Errorf("decoded final path as %q; want %q", r.URL.Path, testPath) + } + <-leaveHandler + }) + defer st.Close() + st.greet() + nextStreamID := uint32(1) + streamID := func() uint32 { + defer func() { nextStreamID += 2 }() + return nextStreamID + } + sendReq := func(id uint32, headers ...string) { + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(headers...), + EndStream: true, + EndHeaders: true, + }) + } + for i := 0; i < defaultMaxStreams; i++ { + sendReq(streamID()) + <-inHandler + } + defer func() { + for i := 0; i < defaultMaxStreams; i++ { + leaveHandler <- true + } + }() + + // And this one should cross the limit: + // (It's also sent as a CONTINUATION, to verify we still track the decoder context, + // even if we're rejecting it) + rejectID := streamID() + headerBlock := st.encodeHeader(":path", testPath) + frag1, frag2 := headerBlock[:3], headerBlock[3:] + st.writeHeaders(HeadersFrameParam{ + StreamID: rejectID, + BlockFragment: frag1, + EndStream: true, + EndHeaders: false, // CONTINUATION coming + }) + if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil { + t.Fatal(err) + } + st.wantRSTStream(rejectID, ErrCodeProtocol) + + // But let a handler finish: + leaveHandler <- true + st.wantHeaders() + + // And now another stream should be able to start: + goodID := streamID() + sendReq(goodID, ":path", testPath) + select { + case got := <-inHandler: + if got != goodID { + t.Errorf("Got stream %d; want %d", got, goodID) + } + case <-time.After(3 * time.Second): + t.Error("timeout waiting for handler") + } +} + +// So many response headers that the server needs to use CONTINUATION frames: +func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + h := w.Header() + for i := 0; i < 5000; i++ { + h.Set(fmt.Sprintf("x-header-%d", i), fmt.Sprintf("x-value-%d", i)) + } + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.HeadersEnded() { + t.Fatal("got unwanted END_HEADERS flag") + } + n := 0 + for { + n++ + cf := st.wantContinuation() + if cf.HeadersEnded() { + break + } + } + if n < 5 { + t.Errorf("Only got %d CONTINUATION frames; expected 5+ (currently 6)", n) + } + }) +} + +// This previously crashed (reported by Mathieu Lonjaret as observed +// while using Camlistore) because we got a DATA frame from the client +// after the handler exited and our logic at the time was wrong, +// keeping a stream in the map in stateClosed, which tickled an +// invariant check later when we tried to remove that stream (via +// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop +// ended. +func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + // nothing + return nil + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: false, // DATA is coming + EndHeaders: true, + }) + hf := st.wantHeaders() + if !hf.HeadersEnded() || !hf.StreamEnded() { + t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf) + } + + // Sent when the a Handler closes while a client has + // indicated it's still sending DATA: + st.wantRSTStream(1, ErrCodeNo) + + // Now the handler has ended, so it's ended its + // stream, but the client hasn't closed its side + // (stateClosedLocal). So send more data and verify + // it doesn't crash with an internal invariant panic, like + // it did before. + st.writeData(1, true, []byte("foo")) + + // Get our flow control bytes back, since the handler didn't get them. + st.wantWindowUpdate(0, uint32(len("foo"))) + + // Sent after a peer sends data anyway (admittedly the + // previous RST_STREAM might've still been in-flight), + // but they'll get the more friendly 'cancel' code + // first. + st.wantRSTStream(1, ErrCodeStreamClosed) + + // Set up a bunch of machinery to record the panic we saw + // previously. + var ( + panMu sync.Mutex + panicVal interface{} + ) + + testHookOnPanicMu.Lock() + testHookOnPanic = func(sc *serverConn, pv interface{}) bool { + panMu.Lock() + panicVal = pv + panMu.Unlock() + return true + } + testHookOnPanicMu.Unlock() + + // Now force the serve loop to end, via closing the connection. + st.cc.Close() + select { + case <-st.sc.doneServing: + // Loop has exited. + panMu.Lock() + got := panicVal + panMu.Unlock() + if got != nil { + t.Errorf("Got panic: %v", got) + } + case <-time.After(5 * time.Second): + t.Error("timeout") + } + }) +} + +func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) } +func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) } + +func testRejectTLS(t *testing.T, max uint16) { + st := newServerTester(t, nil, func(c *tls.Config) { + c.MaxVersion = max + }) + defer st.Close() + gf := st.wantGoAway() + if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { + t.Errorf("Got error code %v; want %v", got, want) + } +} + +func TestServer_Rejects_TLSBadCipher(t *testing.T) { + st := newServerTester(t, nil, func(c *tls.Config) { + // Only list bad ones: + c.CipherSuites = []uint16{ + tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + } + }) + defer st.Close() + gf := st.wantGoAway() + if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { + t.Errorf("Got error code %v; want %v", got, want) + } +} + +func TestServer_Advertises_Common_Cipher(t *testing.T) { + const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + st := newServerTester(t, nil, func(c *tls.Config) { + // Have the client only support the one required by the spec. + c.CipherSuites = []uint16{requiredSuite} + }, func(ts *httptest.Server) { + var srv *http.Server = ts.Config + // Have the server configured with no specific cipher suites. + // This tests that Go's defaults include the required one. + srv.TLSConfig = nil + }) + defer st.Close() + st.greet() +} + +func (st *serverTester) onHeaderField(f hpack.HeaderField) { + if f.Name == "date" { + return + } + st.decodedHeaders = append(st.decodedHeaders, [2]string{f.Name, f.Value}) +} + +func (st *serverTester) decodeHeader(headerBlock []byte) (pairs [][2]string) { + st.decodedHeaders = nil + if _, err := st.hpackDec.Write(headerBlock); err != nil { + st.t.Fatalf("hpack decoding error: %v", err) + } + if err := st.hpackDec.Close(); err != nil { + st.t.Fatalf("hpack decoding error: %v", err) + } + return st.decodedHeaders +} + +// testServerResponse sets up an idle HTTP/2 connection. The client function should +// write a single request that must be handled by the handler. This waits up to 5s +// for client to return, then up to an additional 2s for the handler to return. +func testServerResponse(t testing.TB, + handler func(http.ResponseWriter, *http.Request) error, + client func(*serverTester), +) { + errc := make(chan error, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if r.Body == nil { + t.Fatal("nil Body") + } + errc <- handler(w, r) + }) + defer st.Close() + + donec := make(chan bool) + go func() { + defer close(donec) + st.greet() + client(st) + }() + + select { + case <-donec: + case <-time.After(5 * time.Second): + t.Fatal("timeout in client") + } + + select { + case err := <-errc: + if err != nil { + t.Fatalf("Error in handler: %v", err) + } + case <-time.After(2 * time.Second): + t.Fatal("timeout in handler") + } +} + +// readBodyHandler returns an http Handler func that reads len(want) +// bytes from r.Body and fails t if the contents read were not +// the value of want. +func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + buf := make([]byte, len(want)) + _, err := io.ReadFull(r.Body, buf) + if err != nil { + t.Error(err) + return + } + if string(buf) != want { + t.Errorf("read %q; want %q", buf, want) + } + } +} + +// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See: +// https://github.com/tatsuhiro-t/nghttp2/issues/140 & +// http://sourceforge.net/p/curl/bugs/1472/ +func TestServerWithCurl(t *testing.T) { testServerWithCurl(t, false) } +func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) } + +func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) { + if runtime.GOOS != "linux" { + t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway") + } + if testing.Short() { + t.Skip("skipping curl test in short mode") + } + requireCurl(t) + var gotConn int32 + testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) } + + const msg = "Hello from curl!\n" + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Foo", "Bar") + w.Header().Set("Client-Proto", r.Proto) + io.WriteString(w, msg) + })) + ConfigureServer(ts.Config, &Server{ + PermitProhibitedCipherSuites: permitProhibitedCipherSuites, + }) + ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config + ts.StartTLS() + defer ts.Close() + + t.Logf("Running test server for curl to hit at: %s", ts.URL) + container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL) + defer kill(container) + resc := make(chan interface{}, 1) + go func() { + res, err := dockerLogs(container) + if err != nil { + resc <- err + } else { + resc <- res + } + }() + select { + case res := <-resc: + if err, ok := res.(error); ok { + t.Fatal(err) + } + body := string(res.([]byte)) + // Search for both "key: value" and "key:value", since curl changed their format + // Our Dockerfile contains the latest version (no space), but just in case people + // didn't rebuild, check both. + if !strings.Contains(body, "foo: Bar") && !strings.Contains(body, "foo:Bar") { + t.Errorf("didn't see foo: Bar header") + t.Logf("Got: %s", body) + } + if !strings.Contains(body, "client-proto: HTTP/2") && !strings.Contains(body, "client-proto:HTTP/2") { + t.Errorf("didn't see client-proto: HTTP/2 header") + t.Logf("Got: %s", res) + } + if !strings.Contains(string(res.([]byte)), msg) { + t.Errorf("didn't see %q content", msg) + t.Logf("Got: %s", res) + } + case <-time.After(3 * time.Second): + t.Errorf("timeout waiting for curl") + } + + if atomic.LoadInt32(&gotConn) == 0 { + t.Error("never saw an http2 connection") + } +} + +var doh2load = flag.Bool("h2load", false, "Run h2load test") + +func TestServerWithH2Load(t *testing.T) { + if !*doh2load { + t.Skip("Skipping without --h2load flag.") + } + if runtime.GOOS != "linux" { + t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway") + } + requireH2load(t) + + msg := strings.Repeat("Hello, h2load!\n", 5000) + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + io.WriteString(w, msg) + })) + ts.StartTLS() + defer ts.Close() + + cmd := exec.Command("docker", "run", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl", + "-n100000", "-c100", "-m100", ts.URL) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + t.Fatal(err) + } +} + +// Issue 12843 +func TestServerDoS_MaxHeaderListSize(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + defer st.Close() + + // shake hands + frameSize := defaultMaxReadFrameSize + var advHeaderListSize *uint32 + st.greetAndCheckSettings(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + if s.Val < minMaxFrameSize { + frameSize = minMaxFrameSize + } else if s.Val > maxFrameSize { + frameSize = maxFrameSize + } else { + frameSize = int(s.Val) + } + case SettingMaxHeaderListSize: + advHeaderListSize = &s.Val + } + return nil + }) + + if advHeaderListSize == nil { + t.Errorf("server didn't advertise a max header list size") + } else if *advHeaderListSize == 0 { + t.Errorf("server advertised a max header list size of 0") + } + + st.encodeHeaderField(":method", "GET") + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + cookie := strings.Repeat("*", 4058) + st.encodeHeaderField("cookie", cookie) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.headerBuf.Bytes(), + EndStream: true, + EndHeaders: false, + }) + + // Capture the short encoding of a duplicate ~4K cookie, now + // that we've already sent it once. + st.headerBuf.Reset() + st.encodeHeaderField("cookie", cookie) + + // Now send 1MB of it. + const size = 1 << 20 + b := bytes.Repeat(st.headerBuf.Bytes(), size/st.headerBuf.Len()) + for len(b) > 0 { + chunk := b + if len(chunk) > frameSize { + chunk = chunk[:frameSize] + } + b = b[len(chunk):] + st.fr.WriteContinuation(1, len(b) == 0, chunk) + } + + h := st.wantHeaders() + if !h.HeadersEnded() { + t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) + } + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "431"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", "63"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +func TestCompressionErrorOnWrite(t *testing.T) { + const maxStrLen = 8 << 10 + var serverConfig *http.Server + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }, func(ts *httptest.Server) { + serverConfig = ts.Config + serverConfig.MaxHeaderBytes = maxStrLen + }) + st.addLogFilter("connection error: COMPRESSION_ERROR") + defer st.Close() + st.greet() + + maxAllowed := st.sc.framer.maxHeaderStringLen() + + // Crank this up, now that we have a conn connected with the + // hpack.Decoder's max string length set has been initialized + // from the earlier low ~8K value. We want this higher so don't + // hit the max header list size. We only want to test hitting + // the max string size. + serverConfig.MaxHeaderBytes = 1 << 20 + + // First a request with a header that's exactly the max allowed size + // for the hpack compression. It's still too long for the header list + // size, so we'll get the 431 error, but that keeps the compression + // context still valid. + hbf := st.encodeHeader("foo", strings.Repeat("a", maxAllowed)) + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + if !h.HeadersEnded() { + t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) + } + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "431"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", "63"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } + df := st.wantData() + if !strings.Contains(string(df.Data()), "HTTP Error 431") { + t.Errorf("Unexpected data body: %q", df.Data()) + } + if !df.StreamEnded() { + t.Fatalf("expect data stream end") + } + + // And now send one that's just one byte too big. + hbf = st.encodeHeader("bar", strings.Repeat("b", maxAllowed+1)) + st.writeHeaders(HeadersFrameParam{ + StreamID: 3, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeCompression { + t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode) + } +} + +func TestCompressionErrorOnClose(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }) + st.addLogFilter("connection error: COMPRESSION_ERROR") + defer st.Close() + st.greet() + + hbf := st.encodeHeader("foo", "bar") + hbf = hbf[:len(hbf)-1] // truncate one byte from the end, so hpack.Decoder.Close fails. + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeCompression { + t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode) + } +} + +// test that a server handler can read trailers from a client +func TestServerReadsTrailers(t *testing.T) { + const testBody = "some test body" + writeReq := func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader("trailer", "Foo, Bar", "trailer", "Baz"), + EndStream: false, + EndHeaders: true, + }) + st.writeData(1, false, []byte(testBody)) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeaderRaw( + "foo", "foov", + "bar", "barv", + "baz", "bazv", + "surprise", "wasn't declared; shouldn't show up", + ), + EndStream: true, + EndHeaders: true, + }) + } + checkReq := func(r *http.Request) { + wantTrailer := http.Header{ + "Foo": nil, + "Bar": nil, + "Baz": nil, + } + if !reflect.DeepEqual(r.Trailer, wantTrailer) { + t.Errorf("initial Trailer = %v; want %v", r.Trailer, wantTrailer) + } + slurp, err := ioutil.ReadAll(r.Body) + if string(slurp) != testBody { + t.Errorf("read body %q; want %q", slurp, testBody) + } + if err != nil { + t.Fatalf("Body slurp: %v", err) + } + wantTrailerAfter := http.Header{ + "Foo": {"foov"}, + "Bar": {"barv"}, + "Baz": {"bazv"}, + } + if !reflect.DeepEqual(r.Trailer, wantTrailerAfter) { + t.Errorf("final Trailer = %v; want %v", r.Trailer, wantTrailerAfter) + } + } + testServerRequest(t, writeReq, checkReq) +} + +// test that a server handler can send trailers +func TestServerWritesTrailers_WithFlush(t *testing.T) { testServerWritesTrailers(t, true) } +func TestServerWritesTrailers_WithoutFlush(t *testing.T) { testServerWritesTrailers(t, false) } + +func testServerWritesTrailers(t *testing.T, withFlush bool) { + // See https://httpwg.github.io/specs/rfc7540.html#rfc.section.8.1.3 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Trailer", "Server-Trailer-A, Server-Trailer-B") + w.Header().Add("Trailer", "Server-Trailer-C") + w.Header().Add("Trailer", "Transfer-Encoding, Content-Length, Trailer") // filtered + + // Regular headers: + w.Header().Set("Foo", "Bar") + w.Header().Set("Content-Length", "5") // len("Hello") + + io.WriteString(w, "Hello") + if withFlush { + w.(http.Flusher).Flush() + } + w.Header().Set("Server-Trailer-A", "valuea") + w.Header().Set("Server-Trailer-C", "valuec") // skipping B + // After a flush, random keys like Server-Surprise shouldn't show up: + w.Header().Set("Server-Surpise", "surprise! this isn't predeclared!") + // But we do permit promoting keys to trailers after a + // flush if they start with the magic + // otherwise-invalid "Trailer:" prefix: + w.Header().Set("Trailer:Post-Header-Trailer", "hi1") + w.Header().Set("Trailer:post-header-trailer2", "hi2") + w.Header().Set("Trailer:Range", "invalid") + w.Header().Set("Trailer:Foo\x01Bogus", "invalid") + w.Header().Set("Transfer-Encoding", "should not be included; Forbidden by RFC 7230 4.1.2") + w.Header().Set("Content-Length", "should not be included; Forbidden by RFC 7230 4.1.2") + w.Header().Set("Trailer", "should not be included; Forbidden by RFC 7230 4.1.2") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("response HEADERS had END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("response HEADERS didn't have END_HEADERS") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo", "Bar"}, + {"trailer", "Server-Trailer-A, Server-Trailer-B"}, + {"trailer", "Server-Trailer-C"}, + {"trailer", "Transfer-Encoding, Content-Length, Trailer"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", "5"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + df := st.wantData() + if string(df.Data()) != "Hello" { + t.Fatalf("Client read %q; want Hello", df.Data()) + } + if df.StreamEnded() { + t.Fatalf("data frame had STREAM_ENDED") + } + tf := st.wantHeaders() // for the trailers + if !tf.StreamEnded() { + t.Fatalf("trailers HEADERS lacked END_STREAM") + } + if !tf.HeadersEnded() { + t.Fatalf("trailers HEADERS lacked END_HEADERS") + } + wanth = [][2]string{ + {"post-header-trailer", "hi1"}, + {"post-header-trailer2", "hi2"}, + {"server-trailer-a", "valuea"}, + {"server-trailer-c", "valuec"}, + } + goth = st.decodeHeader(tf.HeaderBlockFragment()) + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + }) +} + +// validate transmitted header field names & values +// golang.org/issue/14048 +func TestServerDoesntWriteInvalidHeaders(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Add("OK1", "x") + w.Header().Add("Bad:Colon", "x") // colon (non-token byte) in key + w.Header().Add("Bad1\x00", "x") // null in key + w.Header().Add("Bad2", "x\x00y") // null in value + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Error("response HEADERS lacked END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("response HEADERS didn't have END_HEADERS") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"ok1", "x"}, + {"content-length", "0"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + }) +} + +func BenchmarkServerGets(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + + const msg = "Hello, world" + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + + // Give the server quota to reply. (plus it has the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + id := 1 + uint32(i)*2 + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } + } +} + +func BenchmarkServerPosts(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + + const msg = "Hello, world" + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + // Consume the (empty) body from th peer before replying, otherwise + // the server will sometimes (depending on scheduling) send the peer a + // a RST_STREAM with the CANCEL error code. + if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { + b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + + // Give the server quota to reply. (plus it has the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + id := 1 + uint32(i)*2 + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + st.writeData(id, true, nil) + st.wantHeaders() + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } + } +} + +// Send a stream of messages from server to client in separate data frames. +// Brings up performance issues seen in long streams. +// Created to show problem in go issue #18502 +func BenchmarkServerToClientStreamDefaultOptions(b *testing.B) { + benchmarkServerToClientStream(b) +} + +// Justification for Change-Id: Iad93420ef6c3918f54249d867098f1dadfa324d8 +// Expect to see memory/alloc reduction by opting in to Frame reuse with the Framer. +func BenchmarkServerToClientStreamReuseFrames(b *testing.B) { + benchmarkServerToClientStream(b, optFramerReuseFrames) +} + +func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msgLen = 1 + // default window size + const windowSize = 1<<16 - 1 + + // next message to send from the server and for the client to expect + nextMsg := func(i int) []byte { + msg := make([]byte, msgLen) + msg[0] = byte(i) + if len(msg) != msgLen { + panic("invalid test setup msg length") + } + return msg + } + + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + // Consume the (empty) body from th peer before replying, otherwise + // the server will sometimes (depending on scheduling) send the peer a + // a RST_STREAM with the CANCEL error code. + if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { + b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) + } + for i := 0; i < b.N; i += 1 { + w.Write(nextMsg(i)) + w.(http.Flusher).Flush() + } + }, newServerOpts...) + defer st.Close() + st.greet() + + const id = uint32(1) + + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + + st.writeData(id, true, nil) + st.wantHeaders() + + var pendingWindowUpdate = uint32(0) + + for i := 0; i < b.N; i += 1 { + expected := nextMsg(i) + df := st.wantData() + if bytes.Compare(expected, df.data) != 0 { + b.Fatalf("Bad message received; want %v; got %v", expected, df.data) + } + // try to send infrequent but large window updates so they don't overwhelm the test + pendingWindowUpdate += uint32(len(df.data)) + if pendingWindowUpdate >= windowSize/2 { + if err := st.fr.WriteWindowUpdate(0, pendingWindowUpdate); err != nil { + b.Fatal(err) + } + if err := st.fr.WriteWindowUpdate(id, pendingWindowUpdate); err != nil { + b.Fatal(err) + } + pendingWindowUpdate = 0 + } + } + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } +} + +// go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53 +// Verify we don't hang. +func TestIssue53(t *testing.T) { + const data = "PRI * HTTP/2.0\r\n\r\nSM" + + "\r\n\r\n\x00\x00\x00\x01\ainfinfin\ad" + s := &http.Server{ + ErrorLog: log.New(io.MultiWriter(stderrv(), twriter{t: t}), "", log.LstdFlags), + Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("hello")) + }), + } + s2 := &Server{ + MaxReadFrameSize: 1 << 16, + PermitProhibitedCipherSuites: true, + } + c := &issue53Conn{[]byte(data), false, false} + s2.ServeConn(c, &ServeConnOpts{BaseConfig: s}) + if !c.closed { + t.Fatal("connection is not closed") + } +} + +type issue53Conn struct { + data []byte + closed bool + written bool +} + +func (c *issue53Conn) Read(b []byte) (n int, err error) { + if len(c.data) == 0 { + return 0, io.EOF + } + n = copy(b, c.data) + c.data = c.data[n:] + return +} + +func (c *issue53Conn) Write(b []byte) (n int, err error) { + c.written = true + return len(b), nil +} + +func (c *issue53Conn) Close() error { + c.closed = true + return nil +} + +func (c *issue53Conn) LocalAddr() net.Addr { + return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706} +} +func (c *issue53Conn) RemoteAddr() net.Addr { + return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706} +} +func (c *issue53Conn) SetDeadline(t time.Time) error { return nil } +func (c *issue53Conn) SetReadDeadline(t time.Time) error { return nil } +func (c *issue53Conn) SetWriteDeadline(t time.Time) error { return nil } + +// golang.org/issue/12895 +func TestConfigureServer(t *testing.T) { + tests := []struct { + name string + tlsConfig *tls.Config + wantErr string + }{ + { + name: "empty server", + }, + { + name: "just the required cipher suite", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + }, + }, + { + name: "just the alternative required cipher suite", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + }, + }, + { + name: "missing required cipher suite", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384}, + }, + wantErr: "is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.", + }, + { + name: "required after bad", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + }, + wantErr: "contains an HTTP/2-approved cipher suite (0xc02f), but it comes after", + }, + { + name: "bad after required", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_RC4_128_SHA}, + }, + }, + } + for _, tt := range tests { + srv := &http.Server{TLSConfig: tt.tlsConfig} + err := ConfigureServer(srv, nil) + if (err != nil) != (tt.wantErr != "") { + if tt.wantErr != "" { + t.Errorf("%s: success, but want error", tt.name) + } else { + t.Errorf("%s: unexpected error: %v", tt.name, err) + } + } + if err != nil && tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("%s: err = %v; want substring %q", tt.name, err, tt.wantErr) + } + if err == nil && !srv.TLSConfig.PreferServerCipherSuites { + t.Errorf("%s: PreferServerCipherSuite is false; want true", tt.name) + } + } +} + +func TestServerRejectHeadWithBody(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "HEAD"), + EndStream: false, // what we're testing, a bogus HEAD request with body + EndHeaders: true, + }) + st.wantRSTStream(1, ErrCodeProtocol) +} + +func TestServerNoAutoContentLengthOnHead(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. (or smaller than one frame) + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "HEAD"), + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "200"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +// golang.org/issue/13495 +func TestServerNoDuplicateContentType(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.Header()["Content-Type"] = []string{""} + fmt.Fprintf(w, "hi") + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "200"}, + {"content-type", ""}, + {"content-length", "41"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +func disableGoroutineTracking() (restore func()) { + old := DebugGoroutines + DebugGoroutines = false + return func() { DebugGoroutines = old } +} + +func BenchmarkServer_GetRequest(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msg = "Hello, world." + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil || n > 0 { + b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + + st.greet() + // Give the server quota to reply. (plus it has the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + hbf := st.encodeHeader(":method", "GET") + for i := 0; i < b.N; i++ { + streamID := uint32(1 + 2*i) + st.writeHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + st.wantData() + } +} + +func BenchmarkServer_PostRequest(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msg = "Hello, world." + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil || n > 0 { + b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + // Give the server quota to reply. (plus it has the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + hbf := st.encodeHeader(":method", "POST") + for i := 0; i < b.N; i++ { + streamID := uint32(1 + 2*i) + st.writeHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: hbf, + EndStream: false, + EndHeaders: true, + }) + st.writeData(streamID, true, nil) + st.wantHeaders() + st.wantData() + } +} + +type connStateConn struct { + net.Conn + cs tls.ConnectionState +} + +func (c connStateConn) ConnectionState() tls.ConnectionState { return c.cs } + +// golang.org/issue/12737 -- handle any net.Conn, not just +// *tls.Conn. +func TestServerHandleCustomConn(t *testing.T) { + var s Server + c1, c2 := net.Pipe() + clientDone := make(chan struct{}) + handlerDone := make(chan struct{}) + var req *http.Request + go func() { + defer close(clientDone) + defer c2.Close() + fr := NewFramer(c2, c2) + io.WriteString(c2, ClientPreface) + fr.WriteSettings() + fr.WriteSettingsAck() + f, err := fr.ReadFrame() + if err != nil { + t.Error(err) + return + } + if sf, ok := f.(*SettingsFrame); !ok || sf.IsAck() { + t.Errorf("Got %v; want non-ACK SettingsFrame", summarizeFrame(f)) + return + } + f, err = fr.ReadFrame() + if err != nil { + t.Error(err) + return + } + if sf, ok := f.(*SettingsFrame); !ok || !sf.IsAck() { + t.Errorf("Got %v; want ACK SettingsFrame", summarizeFrame(f)) + return + } + var henc hpackEncoder + fr.WriteHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: henc.encodeHeaderRaw(t, ":method", "GET", ":path", "/", ":scheme", "https", ":authority", "foo.com"), + EndStream: true, + EndHeaders: true, + }) + go io.Copy(ioutil.Discard, c2) + <-handlerDone + }() + const testString = "my custom ConnectionState" + fakeConnState := tls.ConnectionState{ + ServerName: testString, + Version: tls.VersionTLS12, + CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + } + go s.ServeConn(connStateConn{c1, fakeConnState}, &ServeConnOpts{ + BaseConfig: &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer close(handlerDone) + req = r + }), + }}) + select { + case <-clientDone: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for handler") + } + if req.TLS == nil { + t.Fatalf("Request.TLS is nil. Got: %#v", req) + } + if req.TLS.ServerName != testString { + t.Fatalf("Request.TLS = %+v; want ServerName of %q", req.TLS, testString) + } +} + +// golang.org/issue/14214 +func TestServer_Rejects_ConnHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Error("should not get to Handler") + }) + defer st.Close() + st.greet() + st.bodylessReq1("connection", "foo") + hf := st.wantHeaders() + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "400"}, + {"content-type", "text/plain; charset=utf-8"}, + {"x-content-type-options", "nosniff"}, + {"content-length", "51"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } +} + +type hpackEncoder struct { + enc *hpack.Encoder + buf bytes.Buffer +} + +func (he *hpackEncoder) encodeHeaderRaw(t *testing.T, headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + he.buf.Reset() + if he.enc == nil { + he.enc = hpack.NewEncoder(&he.buf) + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + err := he.enc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } + headers = headers[2:] + } + return he.buf.Bytes() +} + +func TestCheckValidHTTP2Request(t *testing.T) { + tests := []struct { + h http.Header + want error + }{ + { + h: http.Header{"Te": {"trailers"}}, + want: nil, + }, + { + h: http.Header{"Te": {"trailers", "bogus"}}, + want: errors.New(`request header "TE" may only be "trailers" in HTTP/2`), + }, + { + h: http.Header{"Foo": {""}}, + want: nil, + }, + { + h: http.Header{"Connection": {""}}, + want: errors.New(`request header "Connection" is not valid in HTTP/2`), + }, + { + h: http.Header{"Proxy-Connection": {""}}, + want: errors.New(`request header "Proxy-Connection" is not valid in HTTP/2`), + }, + { + h: http.Header{"Keep-Alive": {""}}, + want: errors.New(`request header "Keep-Alive" is not valid in HTTP/2`), + }, + { + h: http.Header{"Upgrade": {""}}, + want: errors.New(`request header "Upgrade" is not valid in HTTP/2`), + }, + } + for i, tt := range tests { + got := checkValidHTTP2RequestHeaders(tt.h) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. checkValidHTTP2Request = %v; want %v", i, got, tt.want) + } + } +} + +// golang.org/issue/14030 +func TestExpect100ContinueAfterHandlerWrites(t *testing.T) { + const msg = "Hello" + const msg2 = "World" + + doRead := make(chan bool, 1) + defer close(doRead) // fallback cleanup + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + + // Do a read, which might force a 100-continue status to be sent. + <-doRead + r.Body.Read(make([]byte, 10)) + + io.WriteString(w, msg2) + + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, _ := http.NewRequest("POST", st.ts.URL, io.LimitReader(neverEnding('A'), 2<<20)) + req.Header.Set("Expect", "100-continue") + + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + buf := make([]byte, len(msg)) + if _, err := io.ReadFull(res.Body, buf); err != nil { + t.Fatal(err) + } + if string(buf) != msg { + t.Fatalf("msg = %q; want %q", buf, msg) + } + + doRead <- true + + if _, err := io.ReadFull(res.Body, buf); err != nil { + t.Fatal(err) + } + if string(buf) != msg2 { + t.Fatalf("second msg = %q; want %q", buf, msg2) + } +} + +type funcReader func([]byte) (n int, err error) + +func (f funcReader) Read(p []byte) (n int, err error) { return f(p) } + +// golang.org/issue/16481 -- return flow control when streams close with unread data. +// (The Server version of the bug. See also TestUnreadFlowControlReturned_Transport) +func TestUnreadFlowControlReturned_Server(t *testing.T) { + unblock := make(chan bool, 1) + defer close(unblock) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // Don't read the 16KB request body. Wait until the client's + // done sending it and then return. This should cause the Server + // to then return those 16KB of flow control to the client. + <-unblock + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + // This previously hung on the 4th iteration. + for i := 0; i < 6; i++ { + body := io.MultiReader( + io.LimitReader(neverEnding('A'), 16<<10), + funcReader(func([]byte) (n int, err error) { + unblock <- true + return 0, io.EOF + }), + ) + req, _ := http.NewRequest("POST", st.ts.URL, body) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + } + +} + +func TestServerIdleTimeout(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + }, func(h2s *Server) { + h2s.IdleTimeout = 500 * time.Millisecond + }) + defer st.Close() + + st.greet() + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeNo { + t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) + } +} + +func TestServerIdleTimeout_AfterRequest(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + const timeout = 250 * time.Millisecond + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + time.Sleep(timeout * 2) + }, func(h2s *Server) { + h2s.IdleTimeout = timeout + }) + defer st.Close() + + st.greet() + + // Send a request which takes twice the timeout. Verifies the + // idle timeout doesn't fire while we're in a request: + st.bodylessReq1() + st.wantHeaders() + + // But the idle timeout should be rearmed after the request + // is done: + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeNo { + t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) + } +} + +// grpc-go closes the Request.Body currently with a Read. +// Verify that it doesn't race. +// See https://github.com/grpc/grpc-go/pull/938 +func TestRequestBodyReadCloseRace(t *testing.T) { + for i := 0; i < 100; i++ { + body := &requestBody{ + pipe: &pipe{ + b: new(bytes.Buffer), + }, + } + body.pipe.CloseWithError(io.EOF) + + done := make(chan bool, 1) + buf := make([]byte, 10) + go func() { + time.Sleep(1 * time.Millisecond) + body.Close() + done <- true + }() + body.Read(buf) + <-done + } +} + +func TestIssue20704Race(t *testing.T) { + if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { + t.Skip("skipping in short mode") + } + const ( + itemSize = 1 << 10 + itemCount = 100 + ) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + for i := 0; i < itemCount; i++ { + _, err := w.Write(make([]byte, itemSize)) + if err != nil { + return + } + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + cl := &http.Client{Transport: tr} + + for i := 0; i < 1000; i++ { + resp, err := cl.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + // Force a RST stream to the server by closing without + // reading the body: + resp.Body.Close() + } +} diff --git a/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml new file mode 100644 index 000000000..31a84bed4 --- /dev/null +++ b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml @@ -0,0 +1,5021 @@ + + + + + + + + + + + + + + + + + + + Hypertext Transfer Protocol version 2 + + + Twist +
    + mbelshe@chromium.org +
    +
    + + + Google, Inc +
    + fenix@google.com +
    +
    + + + Mozilla +
    + + 331 E Evelyn Street + Mountain View + CA + 94041 + US + + martin.thomson@gmail.com +
    +
    + + + Applications + HTTPbis + HTTP + SPDY + Web + + + + This specification describes an optimized expression of the semantics of the Hypertext + Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a + reduced perception of latency by introducing header field compression and allowing multiple + concurrent messages on the same connection. It also introduces unsolicited push of + representations from servers to clients. + + + This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax. + HTTP's existing semantics remain unchanged. + + + + + + Discussion of this draft takes place on the HTTPBIS working group mailing list + (ietf-http-wg@w3.org), which is archived at . + + + Working Group information can be found at ; that specific to HTTP/2 are at . + + + The changes in this draft are summarized in . + + + +
    + + +
    + + + The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the + HTTP/1.1 message format () has + several characteristics that have a negative overall effect on application performance + today. + + + In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given + TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed + request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1 + clients that need to make many requests typically use multiple connections to a server in + order to achieve concurrency and thereby reduce latency. + + + Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary + network traffic, as well as causing the initial TCP congestion + window to quickly fill. This can result in excessive latency when multiple requests are + made on a new TCP connection. + + + HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an + underlying connection. Specifically, it allows interleaving of request and response + messages on the same connection and uses an efficient coding for HTTP header fields. It + also allows prioritization of requests, letting more important requests complete more + quickly, further improving performance. + + + The resulting protocol is more friendly to the network, because fewer TCP connections can + be used in comparison to HTTP/1.x. This means less competition with other flows, and + longer-lived connections, which in turn leads to better utilization of available network + capacity. + + + Finally, HTTP/2 also enables more efficient processing of messages through use of binary + message framing. + +
    + +
    + + HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core + features of HTTP/1.1, but aims to be more efficient in several ways. + + + The basic protocol unit in HTTP/2 is a frame. Each frame + type serves a different purpose. For example, HEADERS and + DATA frames form the basis of HTTP requests and + responses; other frame types like SETTINGS, + WINDOW_UPDATE, and PUSH_PROMISE are used in support of other + HTTP/2 features. + + + Multiplexing of requests is achieved by having each HTTP request-response exchange + associated with its own stream. Streams are largely + independent of each other, so a blocked or stalled request or response does not prevent + progress on other streams. + + + Flow control and prioritization ensure that it is possible to efficiently use multiplexed + streams. Flow control helps to ensure that only data that + can be used by a receiver is transmitted. Prioritization ensures that limited resources can be directed + to the most important streams first. + + + HTTP/2 adds a new interaction mode, whereby a server can push + responses to a client. Server push allows a server to speculatively send a client + data that the server anticipates the client will need, trading off some network usage + against a potential latency gain. The server does this by synthesizing a request, which it + sends as a PUSH_PROMISE frame. The server is then able to send a response to + the synthetic request on a separate stream. + + + Frames that contain HTTP header fields are compressed. + HTTP requests can be highly redundant, so compression can reduce the size of requests and + responses significantly. + + +
    + + The HTTP/2 specification is split into four parts: + + + Starting HTTP/2 covers how an HTTP/2 connection is + initiated. + + + The framing and streams layers describe the way HTTP/2 frames are + structured and formed into multiplexed streams. + + + Frame and error + definitions include details of the frame and error types used in HTTP/2. + + + HTTP mappings and additional + requirements describe how HTTP semantics are expressed using frames and + streams. + + + + + While some of the frame and stream layer concepts are isolated from HTTP, this + specification does not define a completely generic framing layer. The framing and streams + layers are tailored to the needs of the HTTP protocol and server push. + +
    + +
    + + The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD + NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as + described in RFC 2119. + + + All numeric values are in network byte order. Values are unsigned unless otherwise + indicated. Literal values are provided in decimal or hexadecimal as appropriate. + Hexadecimal literals are prefixed with 0x to distinguish them + from decimal literals. + + + The following terms are used: + + + The endpoint initiating the HTTP/2 connection. + + + A transport-layer connection between two endpoints. + + + An error that affects the entire HTTP/2 connection. + + + Either the client or server of the connection. + + + The smallest unit of communication within an HTTP/2 connection, consisting of a header + and a variable-length sequence of octets structured according to the frame type. + + + An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint + that is remote to the primary subject of discussion. + + + An endpoint that is receiving frames. + + + An endpoint that is transmitting frames. + + + The endpoint which did not initiate the HTTP/2 connection. + + + A bi-directional flow of frames across a virtual channel within the HTTP/2 connection. + + + An error on the individual HTTP/2 stream. + + + + + Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined + in . + +
    +
    + +
    + + An HTTP/2 connection is an application layer protocol running on top of a TCP connection + (). The client is the TCP connection initiator. + + + HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same + default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result, + implementations processing requests for target resource URIs like http://example.org/foo or https://example.com/bar are required to first discover whether the + upstream server (the immediate peer to which the client wishes to establish a connection) + supports HTTP/2. + + + + The means by which support for HTTP/2 is determined is different for "http" and "https" + URIs. Discovery for "http" URIs is described in . Discovery + for "https" URIs is described in . + + +
    + + The protocol defined in this document has two identifiers. + + + + The string "h2" identifies the protocol where HTTP/2 uses TLS. This identifier is used in the TLS application layer protocol negotiation extension (ALPN) + field and any place that HTTP/2 over TLS is identified. + + + The "h2" string is serialized into an ALPN protocol identifier as the two octet + sequence: 0x68, 0x32. + + + + + The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP. + This identifier is used in the HTTP/1.1 Upgrade header field and any place that + HTTP/2 over TCP is identified. + + + + + + Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message + semantics described in this document. + + + RFC Editor's Note: please remove the remainder of this section prior to the + publication of a final version of this document. + + + Only implementations of the final, published RFC can identify themselves as "h2" or "h2c". + Until such an RFC exists, implementations MUST NOT identify themselves using these + strings. + + + Examples and text throughout the rest of this document use "h2" as a matter of + editorial convenience only. Implementations of draft versions MUST NOT identify using + this string. + + + Implementations of draft versions of the protocol MUST add the string "-" and the + corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11 + over TLS is identified using the string "h2-11". + + + Non-compatible experiments that are based on these draft versions MUST append the string + "-" and an experiment name to the identifier. For example, an experimental implementation + of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself + as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in + . Experimenters are + encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list. + +
    + +
    + + A client that makes a request for an "http" URI without prior knowledge about support for + HTTP/2 uses the HTTP Upgrade mechanism (). The client makes an HTTP/1.1 request that includes an Upgrade + header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include + exactly one HTTP2-Settings header field. + +
    + For example: + + +]]> +
    + + Requests that contain an entity body MUST be sent in their entirety before the client can + send HTTP/2 frames. This means that a large request entity can block the use of the + connection until it is completely sent. + + + If concurrency of an initial request with subsequent requests is important, an OPTIONS + request can be used to perform the upgrade to HTTP/2, at the cost of an additional + round-trip. + + + A server that does not support HTTP/2 can respond to the request as though the Upgrade + header field were absent: + +
    + +HTTP/1.1 200 OK +Content-Length: 243 +Content-Type: text/html + +... + +
    + + A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with + "h2" implies HTTP/2 over TLS, which is instead negotiated as described in . + + + A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols) + response. After the empty line that terminates the 101 response, the server can begin + sending HTTP/2 frames. These frames MUST include a response to the request that initiated + the Upgrade. + + +
    + + For example: + + +HTTP/1.1 101 Switching Protocols +Connection: Upgrade +Upgrade: h2c + +[ HTTP/2 connection ... + +
    + + The first HTTP/2 frame sent by the server is a SETTINGS frame () as the server connection preface (). Upon receiving the 101 response, the client sends a connection preface, which includes a + SETTINGS frame. + + + The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is + assigned default priority values. Stream 1 is + implicitly half closed from the client toward the server, since the request is completed + as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the + response. + + +
    + + A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one HTTP2-Settings header field. The HTTP2-Settings header field is a connection-specific header field + that includes parameters that govern the HTTP/2 connection, provided in anticipation of + the server accepting the request to upgrade. + +
    + +
    + + A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present, + or if more than one is present. A server MUST NOT send this header field. + + + + The content of the HTTP2-Settings header field is the + payload of a SETTINGS frame (), encoded as a + base64url string (that is, the URL- and filename-safe Base64 encoding described in , with any trailing '=' characters omitted). The + ABNF production for token68 is + defined in . + + + Since the upgrade is only intended to apply to the immediate connection, a client + sending HTTP2-Settings MUST also send HTTP2-Settings as a connection option in the Connection header field to prevent it from being forwarded + downstream. + + + A server decodes and interprets these values as it would any other + SETTINGS frame. Acknowledgement of the + SETTINGS parameters is not necessary, since a 101 response serves as implicit + acknowledgment. Providing these values in the Upgrade request gives a client an + opportunity to provide parameters prior to receiving any frames from the server. + +
    +
    + +
    + + A client that makes a request to an "https" URI uses TLS + with the application layer protocol negotiation extension. + + + HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a + client or selected by a server. + + + Once TLS negotiation is complete, both the client and the server send a connection preface. + +
    + +
    + + A client can learn that a particular server supports HTTP/2 by other means. For example, + describes a mechanism for advertising this capability. + + + A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2, + after the connection preface; a server can + identify such a connection by the presence of the connection preface. This only affects + the establishment of HTTP/2 connections over cleartext TCP; implementations that support + HTTP/2 over TLS MUST use protocol negotiation in TLS. + + + Without additional information, prior support for HTTP/2 is not a strong signal that a + given server will support HTTP/2 for future connections. For example, it is possible for + server configurations to change, for configurations to differ between instances in + clustered servers, or for network conditions to change. + +
    + +
    + + Upon establishment of a TCP connection and determination that HTTP/2 will be used by both + peers, each endpoint MUST send a connection preface as a final confirmation and to + establish the initial SETTINGS parameters for the HTTP/2 connection. The client and + server each send a different connection preface. + + + The client connection preface starts with a sequence of 24 octets, which in hex notation + are: + +
    + +
    + + (the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence + is followed by a SETTINGS frame (). The + SETTINGS frame MAY be empty. The client sends the client connection + preface immediately upon receipt of a 101 Switching Protocols response (indicating a + successful upgrade), or as the first application data octets of a TLS connection. If + starting an HTTP/2 connection with prior knowledge of server support for the protocol, the + client connection preface is sent upon connection establishment. + + + + + The client connection preface is selected so that a large proportion of HTTP/1.1 or + HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note + that this does not address the concerns raised in . + + + + + The server connection preface consists of a potentially empty SETTINGS + frame () that MUST be the first frame the server sends in the + HTTP/2 connection. + + + The SETTINGS frames received from a peer as part of the connection preface + MUST be acknowledged (see ) after sending the connection + preface. + + + To avoid unnecessary latency, clients are permitted to send additional frames to the + server immediately after sending the client connection preface, without waiting to receive + the server connection preface. It is important to note, however, that the server + connection preface SETTINGS frame might include parameters that necessarily + alter how a client is expected to communicate with the server. Upon receiving the + SETTINGS frame, the client is expected to honor any parameters established. + In some configurations, it is possible for the server to transmit SETTINGS + before the client sends additional frames, providing an opportunity to avoid this issue. + + + Clients and servers MUST treat an invalid connection preface as a connection error of type + PROTOCOL_ERROR. A GOAWAY frame () + MAY be omitted in this case, since an invalid preface indicates that the peer is not using + HTTP/2. + +
    +
    + +
    + + Once the HTTP/2 connection is established, endpoints can begin exchanging frames. + + +
    + + All frames begin with a fixed 9-octet header followed by a variable-length payload. + +
    + +
    + + The fields of the frame header are defined as: + + + + The length of the frame payload expressed as an unsigned 24-bit integer. Values + greater than 214 (16,384) MUST NOT be sent unless the receiver has + set a larger value for SETTINGS_MAX_FRAME_SIZE. + + + The 9 octets of the frame header are not included in this value. + + + + + The 8-bit type of the frame. The frame type determines the format and semantics of + the frame. Implementations MUST ignore and discard any frame that has a type that + is unknown. + + + + + An 8-bit field reserved for frame-type specific boolean flags. + + + Flags are assigned semantics specific to the indicated frame type. Flags that have + no defined semantics for a particular frame type MUST be ignored, and MUST be left + unset (0) when sending. + + + + + A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST + remain unset (0) when sending and MUST be ignored when receiving. + + + + + A 31-bit stream identifier (see ). The value 0 is + reserved for frames that are associated with the connection as a whole as opposed to + an individual stream. + + + + + + The structure and content of the frame payload is dependent entirely on the frame type. + +
    + +
    + + The size of a frame payload is limited by the maximum size that a receiver advertises in + the SETTINGS_MAX_FRAME_SIZE setting. This setting can have any value + between 214 (16,384) and 224-1 (16,777,215) octets, + inclusive. + + + All implementations MUST be capable of receiving and minimally processing frames up to + 214 octets in length, plus the 9 octet frame + header. The size of the frame header is not included when describing frame sizes. + + + Certain frame types, such as PING, impose additional limits + on the amount of payload data allowed. + + + + + If a frame size exceeds any defined limit, or is too small to contain mandatory frame + data, the endpoint MUST send a FRAME_SIZE_ERROR error. A frame size error + in a frame that could alter the state of the entire connection MUST be treated as a connection error; this includes any frame carrying + a header block (that is, HEADERS, + PUSH_PROMISE, and CONTINUATION), SETTINGS, + and any WINDOW_UPDATE frame with a stream identifier of 0. + + + Endpoints are not obligated to use all available space in a frame. Responsiveness can be + improved by using frames that are smaller than the permitted maximum size. Sending large + frames can result in delays in sending time-sensitive frames (such + RST_STREAM, WINDOW_UPDATE, or PRIORITY) + which if blocked by the transmission of a large frame, could affect performance. + +
    + +
    + + Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values. + They are used within HTTP request and response messages as well as server push operations + (see ). + + + Header lists are collections of zero or more header fields. When transmitted over a + connection, a header list is serialized into a header block using HTTP Header Compression. The serialized header block is then + divided into one or more octet sequences, called header block fragments, and transmitted + within the payload of HEADERS, PUSH_PROMISE or CONTINUATION frames. + + + The Cookie header field is treated specially by the HTTP + mapping (see ). + + + A receiving endpoint reassembles the header block by concatenating its fragments, then + decompresses the block to reconstruct the header list. + + + A complete header block consists of either: + + + a single HEADERS or PUSH_PROMISE frame, + with the END_HEADERS flag set, or + + + a HEADERS or PUSH_PROMISE frame with the END_HEADERS + flag cleared and one or more CONTINUATION frames, + where the last CONTINUATION frame has the END_HEADERS flag set. + + + + + Header compression is stateful. One compression context and one decompression context is + used for the entire connection. Each header block is processed as a discrete unit. + Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved + frames of any other type or from any other stream. The last frame in a sequence of + HEADERS or CONTINUATION frames MUST have the END_HEADERS + flag set. The last frame in a sequence of PUSH_PROMISE or + CONTINUATION frames MUST have the END_HEADERS flag set. This allows a + header block to be logically equivalent to a single frame. + + + Header block fragments can only be sent as the payload of HEADERS, + PUSH_PROMISE or CONTINUATION frames, because these frames + carry data that can modify the compression context maintained by a receiver. An endpoint + receiving HEADERS, PUSH_PROMISE or + CONTINUATION frames MUST reassemble header blocks and perform decompression + even if the frames are to be discarded. A receiver MUST terminate the connection with a + connection error of type + COMPRESSION_ERROR if it does not decompress a header block. + +
    +
    + +
    + + A "stream" is an independent, bi-directional sequence of frames exchanged between the client + and server within an HTTP/2 connection. Streams have several important characteristics: + + + A single HTTP/2 connection can contain multiple concurrently open streams, with either + endpoint interleaving frames from multiple streams. + + + Streams can be established and used unilaterally or shared by either the client or + server. + + + Streams can be closed by either endpoint. + + + The order in which frames are sent on a stream is significant. Recipients process frames + in the order they are received. In particular, the order of HEADERS, + and DATA frames is semantically significant. + + + Streams are identified by an integer. Stream identifiers are assigned to streams by the + endpoint initiating the stream. + + + + +
    + + The lifecycle of a stream is shown in . + + +
    + + | |<-----------' | + | R | closed | R | + `-------------------->| |<--------------------' + +--------+ + + H: HEADERS frame (with implied CONTINUATIONs) + PP: PUSH_PROMISE frame (with implied CONTINUATIONs) + ES: END_STREAM flag + R: RST_STREAM frame +]]> + +
    + + + Note that this diagram shows stream state transitions and the frames and flags that affect + those transitions only. In this regard, CONTINUATION frames do not result + in state transitions; they are effectively part of the HEADERS or + PUSH_PROMISE that they follow. For this purpose, the END_STREAM flag is + processed as a separate event to the frame that bears it; a HEADERS frame + with the END_STREAM flag set can cause two state transitions. + + + Both endpoints have a subjective view of the state of a stream that could be different + when frames are in transit. Endpoints do not coordinate the creation of streams; they are + created unilaterally by either endpoint. The negative consequences of a mismatch in + states are limited to the "closed" state after sending RST_STREAM, where + frames might be received for some time after closing. + + + Streams have the following states: + + + + + + All streams start in the "idle" state. In this state, no frames have been + exchanged. + + + The following transitions are valid from this state: + + + Sending or receiving a HEADERS frame causes the stream to become + "open". The stream identifier is selected as described in . The same HEADERS frame can also + cause a stream to immediately become "half closed". + + + Sending a PUSH_PROMISE frame marks the associated stream for + later use. The stream state for the reserved stream transitions to "reserved + (local)". + + + Receiving a PUSH_PROMISE frame marks the associated stream as + reserved by the remote peer. The state of the stream becomes "reserved + (remote)". + + + + + Receiving any frames other than HEADERS or + PUSH_PROMISE on a stream in this state MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + + + A stream in the "reserved (local)" state is one that has been promised by sending a + PUSH_PROMISE frame. A PUSH_PROMISE frame reserves an + idle stream by associating the stream with an open stream that was initiated by the + remote peer (see ). + + + In this state, only the following transitions are possible: + + + The endpoint can send a HEADERS frame. This causes the stream to + open in a "half closed (remote)" state. + + + Either endpoint can send a RST_STREAM frame to cause the stream + to become "closed". This releases the stream reservation. + + + + + An endpoint MUST NOT send any type of frame other than HEADERS or + RST_STREAM in this state. + + + A PRIORITY frame MAY be received in this state. Receiving any type + of frame other than RST_STREAM or PRIORITY on a stream + in this state MUST be treated as a connection + error of type PROTOCOL_ERROR. + + + + + + + A stream in the "reserved (remote)" state has been reserved by a remote peer. + + + In this state, only the following transitions are possible: + + + Receiving a HEADERS frame causes the stream to transition to + "half closed (local)". + + + Either endpoint can send a RST_STREAM frame to cause the stream + to become "closed". This releases the stream reservation. + + + + + An endpoint MAY send a PRIORITY frame in this state to reprioritize + the reserved stream. An endpoint MUST NOT send any type of frame other than + RST_STREAM, WINDOW_UPDATE, or PRIORITY + in this state. + + + Receiving any type of frame other than HEADERS or + RST_STREAM on a stream in this state MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + + + A stream in the "open" state may be used by both peers to send frames of any type. + In this state, sending peers observe advertised stream + level flow control limits. + + + From this state either endpoint can send a frame with an END_STREAM flag set, which + causes the stream to transition into one of the "half closed" states: an endpoint + sending an END_STREAM flag causes the stream state to become "half closed (local)"; + an endpoint receiving an END_STREAM flag causes the stream state to become "half + closed (remote)". + + + Either endpoint can send a RST_STREAM frame from this state, causing + it to transition immediately to "closed". + + + + + + + A stream that is in the "half closed (local)" state cannot be used for sending + frames. Only WINDOW_UPDATE, PRIORITY and + RST_STREAM frames can be sent in this state. + + + A stream transitions from this state to "closed" when a frame that contains an + END_STREAM flag is received, or when either peer sends a RST_STREAM + frame. + + + A receiver can ignore WINDOW_UPDATE frames in this state, which might + arrive for a short period after a frame bearing the END_STREAM flag is sent. + + + PRIORITY frames received in this state are used to reprioritize + streams that depend on the current stream. + + + + + + + A stream that is "half closed (remote)" is no longer being used by the peer to send + frames. In this state, an endpoint is no longer obligated to maintain a receiver + flow control window if it performs flow control. + + + If an endpoint receives additional frames for a stream that is in this state, other + than WINDOW_UPDATE, PRIORITY or + RST_STREAM, it MUST respond with a stream error of type + STREAM_CLOSED. + + + A stream that is "half closed (remote)" can be used by the endpoint to send frames + of any type. In this state, the endpoint continues to observe advertised stream level flow control limits. + + + A stream can transition from this state to "closed" by sending a frame that contains + an END_STREAM flag, or when either peer sends a RST_STREAM frame. + + + + + + + The "closed" state is the terminal state. + + + An endpoint MUST NOT send frames other than PRIORITY on a closed + stream. An endpoint that receives any frame other than PRIORITY + after receiving a RST_STREAM MUST treat that as a stream error of type + STREAM_CLOSED. Similarly, an endpoint that receives any frames after + receiving a frame with the END_STREAM flag set MUST treat that as a connection error of type + STREAM_CLOSED, unless the frame is permitted as described below. + + + WINDOW_UPDATE or RST_STREAM frames can be received in + this state for a short period after a DATA or HEADERS + frame containing an END_STREAM flag is sent. Until the remote peer receives and + processes RST_STREAM or the frame bearing the END_STREAM flag, it + might send frames of these types. Endpoints MUST ignore + WINDOW_UPDATE or RST_STREAM frames received in this + state, though endpoints MAY choose to treat frames that arrive a significant time + after sending END_STREAM as a connection + error of type PROTOCOL_ERROR. + + + PRIORITY frames can be sent on closed streams to prioritize streams + that are dependent on the closed stream. Endpoints SHOULD process + PRIORITY frame, though they can be ignored if the stream has been + removed from the dependency tree (see ). + + + If this state is reached as a result of sending a RST_STREAM frame, + the peer that receives the RST_STREAM might have already sent - or + enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint + MUST ignore frames that it receives on closed streams after it has sent a + RST_STREAM frame. An endpoint MAY choose to limit the period over + which it ignores frames and treat frames that arrive after this time as being in + error. + + + Flow controlled frames (i.e., DATA) received after sending + RST_STREAM are counted toward the connection flow control window. + Even though these frames might be ignored, because they are sent before the sender + receives the RST_STREAM, the sender will consider the frames to count + against the flow control window. + + + An endpoint might receive a PUSH_PROMISE frame after it sends + RST_STREAM. PUSH_PROMISE causes a stream to become + "reserved" even if the associated stream has been reset. Therefore, a + RST_STREAM is needed to close an unwanted promised stream. + + + + + + In the absence of more specific guidance elsewhere in this document, implementations + SHOULD treat the receipt of a frame that is not expressly permitted in the description of + a state as a connection error of type + PROTOCOL_ERROR. Frame of unknown types are ignored. + + + An example of the state transitions for an HTTP request/response exchange can be found in + . An example of the state transitions for server push can be + found in and . + + +
    + + Streams are identified with an unsigned 31-bit integer. Streams initiated by a client + MUST use odd-numbered stream identifiers; those initiated by the server MUST use + even-numbered stream identifiers. A stream identifier of zero (0x0) is used for + connection control messages; the stream identifier zero cannot be used to establish a + new stream. + + + HTTP/1.1 requests that are upgraded to HTTP/2 (see ) are + responded to with a stream identifier of one (0x1). After the upgrade + completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1 + cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1. + + + The identifier of a newly established stream MUST be numerically greater than all + streams that the initiating endpoint has opened or reserved. This governs streams that + are opened using a HEADERS frame and streams that are reserved using + PUSH_PROMISE. An endpoint that receives an unexpected stream identifier + MUST respond with a connection error of + type PROTOCOL_ERROR. + + + The first use of a new stream identifier implicitly closes all streams in the "idle" + state that might have been initiated by that peer with a lower-valued stream identifier. + For example, if a client sends a HEADERS frame on stream 7 without ever + sending a frame on stream 5, then stream 5 transitions to the "closed" state when the + first frame for stream 7 is sent or received. + + + Stream identifiers cannot be reused. Long-lived connections can result in an endpoint + exhausting the available range of stream identifiers. A client that is unable to + establish a new stream identifier can establish a new connection for new streams. A + server that is unable to establish a new stream identifier can send a + GOAWAY frame so that the client is forced to open a new connection for + new streams. + +
    + +
    + + A peer can limit the number of concurrently active streams using the + SETTINGS_MAX_CONCURRENT_STREAMS parameter (see ) within a SETTINGS frame. The maximum concurrent + streams setting is specific to each endpoint and applies only to the peer that receives + the setting. That is, clients specify the maximum number of concurrent streams the + server can initiate, and servers specify the maximum number of concurrent streams the + client can initiate. + + + Streams that are in the "open" state, or either of the "half closed" states count toward + the maximum number of streams that an endpoint is permitted to open. Streams in any of + these three states count toward the limit advertised in the + SETTINGS_MAX_CONCURRENT_STREAMS setting. Streams in either of the + "reserved" states do not count toward the stream limit. + + + Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a + HEADERS frame that causes their advertised concurrent stream limit to be + exceeded MUST treat this as a stream error. An + endpoint that wishes to reduce the value of + SETTINGS_MAX_CONCURRENT_STREAMS to a value that is below the current + number of open streams can either close streams that exceed the new value or allow + streams to complete. + +
    +
    + +
    + + Using streams for multiplexing introduces contention over use of the TCP connection, + resulting in blocked streams. A flow control scheme ensures that streams on the same + connection do not destructively interfere with each other. Flow control is used for both + individual streams and for the connection as a whole. + + + HTTP/2 provides for flow control through use of the WINDOW_UPDATE frame. + + +
    + + HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be + used without requiring protocol changes. Flow control in HTTP/2 has the following + characteristics: + + + Flow control is specific to a connection; i.e., it is "hop-by-hop", not + "end-to-end". + + + Flow control is based on window update frames. Receivers advertise how many octets + they are prepared to receive on a stream and for the entire connection. This is a + credit-based scheme. + + + Flow control is directional with overall control provided by the receiver. A + receiver MAY choose to set any window size that it desires for each stream and for + the entire connection. A sender MUST respect flow control limits imposed by a + receiver. Clients, servers and intermediaries all independently advertise their + flow control window as a receiver and abide by the flow control limits set by + their peer when sending. + + + The initial value for the flow control window is 65,535 octets for both new streams + and the overall connection. + + + The frame type determines whether flow control applies to a frame. Of the frames + specified in this document, only DATA frames are subject to flow + control; all other frame types do not consume space in the advertised flow control + window. This ensures that important control frames are not blocked by flow control. + + + Flow control cannot be disabled. + + + HTTP/2 defines only the format and semantics of the WINDOW_UPDATE + frame (). This document does not stipulate how a + receiver decides when to send this frame or the value that it sends, nor does it + specify how a sender chooses to send packets. Implementations are able to select + any algorithm that suits their needs. + + + + + Implementations are also responsible for managing how requests and responses are sent + based on priority; choosing how to avoid head of line blocking for requests; and + managing the creation of new streams. Algorithm choices for these could interact with + any flow control algorithm. + +
    + +
    + + Flow control is defined to protect endpoints that are operating under resource + constraints. For example, a proxy needs to share memory between many connections, and + also might have a slow upstream connection and a fast downstream one. Flow control + addresses cases where the receiver is unable process data on one stream, yet wants to + continue to process other streams in the same connection. + + + Deployments that do not require this capability can advertise a flow control window of + the maximum size, incrementing the available space when new data is received. This + effectively disables flow control for that receiver. Conversely, a sender is always + subject to the flow control window advertised by the receiver. + + + Deployments with constrained resources (for example, memory) can employ flow control to + limit the amount of memory a peer can consume. Note, however, that this can lead to + suboptimal use of available network resources if flow control is enabled without + knowledge of the bandwidth-delay product (see ). + + + Even with full awareness of the current bandwidth-delay product, implementation of flow + control can be difficult. When using flow control, the receiver MUST read from the TCP + receive buffer in a timely fashion. Failure to do so could lead to a deadlock when + critical frames, such as WINDOW_UPDATE, are not read and acted upon. + +
    +
    + +
    + + A client can assign a priority for a new stream by including prioritization information in + the HEADERS frame that opens the stream. For an existing + stream, the PRIORITY frame can be used to change the + priority. + + + The purpose of prioritization is to allow an endpoint to express how it would prefer its + peer allocate resources when managing concurrent streams. Most importantly, priority can + be used to select streams for transmitting frames when there is limited capacity for + sending. + + + Streams can be prioritized by marking them as dependent on the completion of other streams + (). Each dependency is assigned a relative weight, a number + that is used to determine the relative proportion of available resources that are assigned + to streams dependent on the same stream. + + + + Explicitly setting the priority for a stream is input to a prioritization process. It + does not guarantee any particular processing or transmission order for the stream relative + to any other stream. An endpoint cannot force a peer to process concurrent streams in a + particular order using priority. Expressing priority is therefore only ever a suggestion. + + + Providing prioritization information is optional, so default values are used if no + explicit indicator is provided (). + + +
    + + Each stream can be given an explicit dependency on another stream. Including a + dependency expresses a preference to allocate resources to the identified stream rather + than to the dependent stream. + + + A stream that is not dependent on any other stream is given a stream dependency of 0x0. + In other words, the non-existent stream 0 forms the root of the tree. + + + A stream that depends on another stream is a dependent stream. The stream upon which a + stream is dependent is a parent stream. A dependency on a stream that is not currently + in the tree - such as a stream in the "idle" state - results in that stream being given + a default priority. + + + When assigning a dependency on another stream, the stream is added as a new dependency + of the parent stream. Dependent streams that share the same parent are not ordered with + respect to each other. For example, if streams B and C are dependent on stream A, and + if stream D is created with a dependency on stream A, this results in a dependency order + of A followed by B, C, and D in any order. + +
    + /|\ + B C B D C +]]> +
    + + An exclusive flag allows for the insertion of a new level of dependencies. The + exclusive flag causes the stream to become the sole dependency of its parent stream, + causing other dependencies to become dependent on the exclusive stream. In the + previous example, if stream D is created with an exclusive dependency on stream A, this + results in D becoming the dependency parent of B and C. + +
    + D + B C / \ + B C +]]> +
    + + Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all + of the streams that it depends on (the chain of parent streams up to 0x0) are either + closed, or it is not possible to make progress on them. + + + A stream cannot depend on itself. An endpoint MUST treat this as a stream error of type PROTOCOL_ERROR. + +
    + +
    + + All dependent streams are allocated an integer weight between 1 and 256 (inclusive). + + + Streams with the same parent SHOULD be allocated resources proportionally based on their + weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A + with weight 12, and if no progress can be made on A, stream B ideally receives one third + of the resources allocated to stream C. + +
    + +
    + + Stream priorities are changed using the PRIORITY frame. Setting a + dependency causes a stream to become dependent on the identified parent stream. + + + Dependent streams move with their parent stream if the parent is reprioritized. Setting + a dependency with the exclusive flag for a reprioritized stream moves all the + dependencies of the new parent stream to become dependent on the reprioritized stream. + + + If a stream is made dependent on one of its own dependencies, the formerly dependent + stream is first moved to be dependent on the reprioritized stream's previous parent. + The moved dependency retains its weight. + +
    + + For example, consider an original dependency tree where B and C depend on A, D and E + depend on C, and F depends on D. If A is made dependent on D, then D takes the place + of A. All other dependency relationships stay the same, except for F, which becomes + dependent on A if the reprioritization is exclusive. + + F B C ==> F A OR A + / \ | / \ /|\ + D E E B C B C F + | | | + F E E + (intermediate) (non-exclusive) (exclusive) +]]> +
    +
    + +
    + + When a stream is removed from the dependency tree, its dependencies can be moved to + become dependent on the parent of the closed stream. The weights of new dependencies + are recalculated by distributing the weight of the dependency of the closed stream + proportionally based on the weights of its dependencies. + + + Streams that are removed from the dependency tree cause some prioritization information + to be lost. Resources are shared between streams with the same parent stream, which + means that if a stream in that set closes or becomes blocked, any spare capacity + allocated to a stream is distributed to the immediate neighbors of the stream. However, + if the common dependency is removed from the tree, those streams share resources with + streams at the next highest level. + + + For example, assume streams A and B share a parent, and streams C and D both depend on + stream A. Prior to the removal of stream A, if streams A and D are unable to proceed, + then stream C receives all the resources dedicated to stream A. If stream A is removed + from the tree, the weight of stream A is divided between streams C and D. If stream D + is still unable to proceed, this results in stream C receiving a reduced proportion of + resources. For equal starting weights, C receives one third, rather than one half, of + available resources. + + + It is possible for a stream to become closed while prioritization information that + creates a dependency on that stream is in transit. If a stream identified in a + dependency has no associated priority information, then the dependent stream is instead + assigned a default priority. This potentially creates + suboptimal prioritization, since the stream could be given a priority that is different + to what is intended. + + + To avoid these problems, an endpoint SHOULD retain stream prioritization state for a + period after streams become closed. The longer state is retained, the lower the chance + that streams are assigned incorrect or default priority values. + + + This could create a large state burden for an endpoint, so this state MAY be limited. + An endpoint MAY apply a fixed upper limit on the number of closed streams for which + prioritization state is tracked to limit state exposure. The amount of additional state + an endpoint maintains could be dependent on load; under high load, prioritization state + can be discarded to limit resource commitments. In extreme cases, an endpoint could + even discard prioritization state for active or reserved streams. If a fixed limit is + applied, endpoints SHOULD maintain state for at least as many streams as allowed by + their setting for SETTINGS_MAX_CONCURRENT_STREAMS. + + + An endpoint receiving a PRIORITY frame that changes the priority of a + closed stream SHOULD alter the dependencies of the streams that depend on it, if it has + retained enough state to do so. + +
    + +
    + + Providing priority information is optional. Streams are assigned a non-exclusive + dependency on stream 0x0 by default. Pushed streams + initially depend on their associated stream. In both cases, streams are assigned a + default weight of 16. + +
    +
    + +
    + + HTTP/2 framing permits two classes of error: + + + An error condition that renders the entire connection unusable is a connection error. + + + An error in an individual stream is a stream error. + + + + + A list of error codes is included in . + + +
    + + A connection error is any error which prevents further processing of the framing layer, + or which corrupts any connection state. + + + An endpoint that encounters a connection error SHOULD first send a GOAWAY + frame () with the stream identifier of the last stream that it + successfully received from its peer. The GOAWAY frame includes an error + code that indicates why the connection is terminating. After sending the + GOAWAY frame, the endpoint MUST close the TCP connection. + + + It is possible that the GOAWAY will not be reliably received by the + receiving endpoint (see ). In the event of a connection error, + GOAWAY only provides a best effort attempt to communicate with the peer + about why the connection is being terminated. + + + An endpoint can end a connection at any time. In particular, an endpoint MAY choose to + treat a stream error as a connection error. Endpoints SHOULD send a + GOAWAY frame when ending a connection, providing that circumstances + permit it. + +
    + +
    + + A stream error is an error related to a specific stream that does not affect processing + of other streams. + + + An endpoint that detects a stream error sends a RST_STREAM frame () that contains the stream identifier of the stream where the error + occurred. The RST_STREAM frame includes an error code that indicates the + type of error. + + + A RST_STREAM is the last frame that an endpoint can send on a stream. + The peer that sends the RST_STREAM frame MUST be prepared to receive any + frames that were sent or enqueued for sending by the remote peer. These frames can be + ignored, except where they modify connection state (such as the state maintained for + header compression, or flow control). + + + Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for + any stream. However, an endpoint MAY send additional RST_STREAM frames if + it receives frames on a closed stream after more than a round-trip time. This behavior + is permitted to deal with misbehaving implementations. + + + An endpoint MUST NOT send a RST_STREAM in response to an + RST_STREAM frame, to avoid looping. + +
    + +
    + + If the TCP connection is closed or reset while streams remain in open or half closed + states, then the endpoint MUST assume that those streams were abnormally interrupted and + could be incomplete. + +
    +
    + +
    + + HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide + additional services or alter any aspect of the protocol, within the limitations described + in this section. Extensions are effective only within the scope of a single HTTP/2 + connection. + + + Extensions are permitted to use new frame types, new + settings, or new error + codes. Registries are established for managing these extension points: frame types, settings and + error codes. + + + Implementations MUST ignore unknown or unsupported values in all extensible protocol + elements. Implementations MUST discard frames that have unknown or unsupported types. + This means that any of these extension points can be safely used by extensions without + prior arrangement or negotiation. However, extension frames that appear in the middle of + a header block are not permitted; these MUST be treated + as a connection error of type + PROTOCOL_ERROR. + + + However, extensions that could change the semantics of existing protocol components MUST + be negotiated before being used. For example, an extension that changes the layout of the + HEADERS frame cannot be used until the peer has given a positive signal + that this is acceptable. In this case, it could also be necessary to coordinate when the + revised layout comes into effect. Note that treating any frame other than + DATA frames as flow controlled is such a change in semantics, and can only + be done through negotiation. + + + This document doesn't mandate a specific method for negotiating the use of an extension, + but notes that a setting could be used for that + purpose. If both peers set a value that indicates willingness to use the extension, then + the extension can be used. If a setting is used for extension negotiation, the initial + value MUST be defined so that the extension is initially disabled. + +
    +
    + +
    + + This specification defines a number of frame types, each identified by a unique 8-bit type + code. Each frame type serves a distinct purpose either in the establishment and management + of the connection as a whole, or of individual streams. + + + The transmission of specific frame types can alter the state of a connection. If endpoints + fail to maintain a synchronized view of the connection state, successful communication + within the connection will no longer be possible. Therefore, it is important that endpoints + have a shared comprehension of how the state is affected by the use any given frame. + + +
    + + DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated + with a stream. One or more DATA frames are used, for instance, to carry HTTP request or + response payloads. + + + DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to + obscure the size of messages. + +
    + +
    + + The DATA frame contains the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is optional and is only present if the PADDED flag is set. + + + Application data. The amount of data is the remainder of the frame payload after + subtracting the length of the other fields that are present. + + + Padding octets that contain no application semantic value. Padding octets MUST be set + to zero when sending and ignored when receiving. + + + + + + The DATA frame defines the following flags: + + + Bit 1 being set indicates that this frame is the last that the endpoint will send for + the identified stream. Setting this flag causes the stream to enter one of the "half closed" states or the "closed" state. + + + Bit 4 being set indicates that the Pad Length field and any padding that it describes + is present. + + + + + DATA frames MUST be associated with a stream. If a DATA frame is received whose stream + identifier field is 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + DATA frames are subject to flow control and can only be sent when a stream is in the + "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow + control, including Pad Length and Padding fields if present. If a DATA frame is received + whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond + with a stream error of type + STREAM_CLOSED. + + + The total number of padding octets is determined by the value of the Pad Length field. If + the length of the padding is greater than the length of the frame payload, the recipient + MUST treat this as a connection error of + type PROTOCOL_ERROR. + + + A frame can be increased in size by one octet by including a Pad Length field with a + value of zero. + + + + + Padding is a security feature; see . + +
    + +
    + + The HEADERS frame (type=0x1) is used to open a stream, + and additionally carries a header block fragment. HEADERS frames can be sent on a stream + in the "open" or "half closed (remote)" states. + +
    + +
    + + The HEADERS frame payload has the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is only present if the PADDED flag is set. + + + A single bit flag indicates that the stream dependency is exclusive, see . This field is only present if the PRIORITY flag is set. + + + A 31-bit stream identifier for the stream that this stream depends on, see . This field is only present if the PRIORITY flag is set. + + + An 8-bit weight for the stream, see . Add one to the + value to obtain a weight between 1 and 256. This field is only present if the + PRIORITY flag is set. + + + A header block fragment. + + + Padding octets that contain no application semantic value. Padding octets MUST be set + to zero when sending and ignored when receiving. + + + + + + The HEADERS frame defines the following flags: + + + + Bit 1 being set indicates that the header block is + the last that the endpoint will send for the identified stream. Setting this flag + causes the stream to enter one of "half closed" + states. + + + A HEADERS frame carries the END_STREAM flag that signals the end of a stream. + However, a HEADERS frame with the END_STREAM flag set can be followed by + CONTINUATION frames on the same stream. Logically, the + CONTINUATION frames are part of the HEADERS frame. + + + + + Bit 3 being set indicates that this frame contains an entire header block and is not followed by any + CONTINUATION frames. + + + A HEADERS frame without the END_HEADERS flag set MUST be followed by a + CONTINUATION frame for the same stream. A receiver MUST treat the + receipt of any other type of frame or a frame on a different stream as a connection error of type + PROTOCOL_ERROR. + + + + + Bit 4 being set indicates that the Pad Length field and any padding that it + describes is present. + + + + + Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight + fields are present; see . + + + + + + + The payload of a HEADERS frame contains a header block + fragment. A header block that does not fit within a HEADERS frame is continued in + a CONTINUATION frame. + + + + HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose + stream identifier field is 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + + The HEADERS frame changes the connection state as described in . + + + + The HEADERS frame includes optional padding. Padding fields and flags are identical to + those defined for DATA frames. + + + Prioritization information in a HEADERS frame is logically equivalent to a separate + PRIORITY frame, but inclusion in HEADERS avoids the potential for churn in + stream prioritization when new streams are created. Priorization fields in HEADERS frames + subsequent to the first on a stream reprioritize the + stream. + +
    + +
    + + The PRIORITY frame (type=0x2) specifies the sender-advised + priority of a stream. It can be sent at any time for an existing stream, including + closed streams. This enables reprioritization of existing streams. + +
    + +
    + + The payload of a PRIORITY frame contains the following fields: + + + A single bit flag indicates that the stream dependency is exclusive, see . + + + A 31-bit stream identifier for the stream that this stream depends on, see . + + + An 8-bit weight for the identified stream dependency, see . Add one to the value to obtain a weight between 1 and 256. + + + + + + The PRIORITY frame does not define any flags. + + + + The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received + with a stream identifier of 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open", + "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be + sent between consecutive frames that comprise a single header + block. Note that this frame could arrive after processing or frame sending has + completed, which would cause it to have no effect on the current stream. For a stream + that is in the "half closed (remote)" or "closed" - state, this frame can only affect + processing of the current stream and not frame transmission. + + + The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state. + This allows for the reprioritization of a group of dependent streams by altering the + priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a + closed stream risks being ignored due to the peer having discarded priority state + information for that stream. + +
    + +
    + + The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by + the initiator of a stream, it indicates that they wish to cancel the stream or that an + error condition has occurred. When sent by the receiver of a stream, it indicates that + either the receiver is rejecting the stream, requesting that the stream be cancelled, or + that an error condition has occurred. + +
    + +
    + + + The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error code. The error code indicates why the stream is being + terminated. + + + + The RST_STREAM frame does not define any flags. + + + + The RST_STREAM frame fully terminates the referenced stream and causes it to enter the + closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send + additional frames for that stream, with the exception of PRIORITY. However, + after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process + additional frames sent on the stream that might have been sent by the peer prior to the + arrival of the RST_STREAM. + + + + RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received + with a stream identifier of 0x0, the recipient MUST treat this as a connection error of type + PROTOCOL_ERROR. + + + + RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM + frame identifying an idle stream is received, the recipient MUST treat this as a connection error of type + PROTOCOL_ERROR. + + +
    + +
    + + The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints + communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is + also used to acknowledge the receipt of those parameters. Individually, a SETTINGS + parameter can also be referred to as a "setting". + + + SETTINGS parameters are not negotiated; they describe characteristics of the sending peer, + which are used by the receiving peer. Different values for the same parameter can be + advertised by each peer. For example, a client might set a high initial flow control + window, whereas a server might set a lower value to conserve resources. + + + + A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be + sent at any other time by either endpoint over the lifetime of the connection. + Implementations MUST support all of the parameters defined by this specification. + + + + Each parameter in a SETTINGS frame replaces any existing value for that parameter. + Parameters are processed in the order in which they appear, and a receiver of a SETTINGS + frame does not need to maintain any state other than the current value of its + parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by + a receiver. + + + SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS + frame defines the following flag: + + + Bit 1 being set indicates that this frame acknowledges receipt and application of the + peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST + be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value + other than 0 MUST be treated as a connection + error of type FRAME_SIZE_ERROR. For more info, see Settings Synchronization. + + + + + SETTINGS frames always apply to a connection, never a single stream. The stream + identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS + frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond + with a connection error of type + PROTOCOL_ERROR. + + + The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame + MUST be treated as a connection error of type + PROTOCOL_ERROR. + + +
    + + The payload of a SETTINGS frame consists of zero or more parameters, each consisting of + an unsigned 16-bit setting identifier and an unsigned 32-bit value. + + +
    + +
    +
    + +
    + + The following parameters are defined: + + + + Allows the sender to inform the remote endpoint of the maximum size of the header + compression table used to decode header blocks, in octets. The encoder can select + any size equal to or less than this value by using signaling specific to the + header compression format inside a header block. The initial value is 4,096 + octets. + + + + + This setting can be use to disable server + push. An endpoint MUST NOT send a PUSH_PROMISE frame if it + receives this parameter set to a value of 0. An endpoint that has both set this + parameter to 0 and had it acknowledged MUST treat the receipt of a + PUSH_PROMISE frame as a connection error of type + PROTOCOL_ERROR. + + + The initial value is 1, which indicates that server push is permitted. Any value + other than 0 or 1 MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + Indicates the maximum number of concurrent streams that the sender will allow. + This limit is directional: it applies to the number of streams that the sender + permits the receiver to create. Initially there is no limit to this value. It is + recommended that this value be no smaller than 100, so as to not unnecessarily + limit parallelism. + + + A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special + by endpoints. A zero value does prevent the creation of new streams, however this + can also happen for any limit that is exhausted with active streams. Servers + SHOULD only set a zero value for short durations; if a server does not wish to + accept requests, closing the connection could be preferable. + + + + + Indicates the sender's initial window size (in octets) for stream level flow + control. The initial value is 216-1 (65,535) octets. + + + This setting affects the window size of all streams, including existing streams, + see . + + + Values above the maximum flow control window size of 231-1 MUST + be treated as a connection error of + type FLOW_CONTROL_ERROR. + + + + + Indicates the size of the largest frame payload that the sender is willing to + receive, in octets. + + + The initial value is 214 (16,384) octets. The value advertised by + an endpoint MUST be between this initial value and the maximum allowed frame size + (224-1 or 16,777,215 octets), inclusive. Values outside this range + MUST be treated as a connection error + of type PROTOCOL_ERROR. + + + + + This advisory setting informs a peer of the maximum size of header list that the + sender is prepared to accept, in octets. The value is based on the uncompressed + size of header fields, including the length of the name and value in octets plus + an overhead of 32 octets for each header field. + + + For any given request, a lower limit than what is advertised MAY be enforced. The + initial value of this setting is unlimited. + + + + + + An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier + MUST ignore that setting. + +
    + +
    + + Most values in SETTINGS benefit from or require an understanding of when the peer has + received and applied the changed parameter values. In order to provide + such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag + is not set MUST apply the updated parameters as soon as possible upon receipt. + + + The values in the SETTINGS frame MUST be processed in the order they appear, with no + other frame processing between values. Unsupported parameters MUST be ignored. Once + all values have been processed, the recipient MUST immediately emit a SETTINGS frame + with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender + of the altered parameters can rely on the setting having been applied. + + + If the sender of a SETTINGS frame does not receive an acknowledgement within a + reasonable amount of time, it MAY issue a connection error of type + SETTINGS_TIMEOUT. + +
    +
    + +
    + + The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of + streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned + 31-bit identifier of the stream the endpoint plans to create along with a set of headers + that provide additional context for the stream. contains a + thorough description of the use of PUSH_PROMISE frames. + + +
    + +
    + + The PUSH_PROMISE frame payload has the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is only present if the PADDED flag is set. + + + A single reserved bit. + + + An unsigned 31-bit integer that identifies the stream that is reserved by the + PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next + stream sent by the sender (see new stream + identifier). + + + A header block fragment containing request header + fields. + + + Padding octets. + + + + + + The PUSH_PROMISE frame defines the following flags: + + + + Bit 3 being set indicates that this frame contains an entire header block and is not followed by any + CONTINUATION frames. + + + A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a + CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any + other type of frame or a frame on a different stream as a connection error of type + PROTOCOL_ERROR. + + + + + Bit 4 being set indicates that the Pad Length field and any padding that it + describes is present. + + + + + + + PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream + identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the + stream identifier field specifies the value 0x0, a recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + + Promised streams are not required to be used in the order they are promised. The + PUSH_PROMISE only reserves stream identifiers for later use. + + + + PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH setting of the + peer endpoint is set to 0. An endpoint that has set this setting and has received + acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a connection error of type + PROTOCOL_ERROR. + + + Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a + RST_STREAM referencing the promised stream identifier back to the sender of + the PUSH_PROMISE. + + + + A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a header block potentially modifies the state maintained for + header compression. PUSH_PROMISE also reserves a stream for later use, causing the + promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a + stream unless that stream is either "open" or "half closed (remote)"; the sender MUST + ensure that the promised stream is a valid choice for a new stream identifier (that is, the promised stream MUST + be in the "idle" state). + + + Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream + state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a + stream that is neither "open" nor "half closed (local)" as a connection error of type + PROTOCOL_ERROR. However, an endpoint that has sent + RST_STREAM on the associated stream MUST handle PUSH_PROMISE frames that + might have been created before the RST_STREAM frame is received and + processed. + + + A receiver MUST treat the receipt of a PUSH_PROMISE that promises an illegal stream identifier (that is, an identifier for a + stream that is not currently in the "idle" state) as a connection error of type + PROTOCOL_ERROR. + + + + The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical + to those defined for DATA frames. + +
    + +
    + + The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the + sender, as well as determining whether an idle connection is still functional. PING + frames can be sent from any endpoint. + +
    + +
    + + + In addition to the frame header, PING frames MUST contain 8 octets of data in the payload. + A sender can include any value it chooses and use those bytes in any fashion. + + + Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with + the ACK flag set in response, with an identical payload. PING responses SHOULD be given + higher priority than any other frame. + + + + The PING frame defines the following flags: + + + Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST + set this flag in PING responses. An endpoint MUST NOT respond to PING frames + containing this flag. + + + + + PING frames are not associated with any individual stream. If a PING frame is received + with a stream identifier field value other than 0x0, the recipient MUST respond with a + connection error of type + PROTOCOL_ERROR. + + + Receipt of a PING frame with a length field value other than 8 MUST be treated as a connection error of type + FRAME_SIZE_ERROR. + + +
    + +
    + + The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this + connection. GOAWAY can be sent by either the client or the server. Once sent, the sender + will ignore frames sent on any new streams with identifiers higher than the included last + stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the + connection, although a new connection can be established for new streams. + + + The purpose of this frame is to allow an endpoint to gracefully stop accepting new + streams, while still finishing processing of previously established streams. This enables + administrative actions, like server maintainance. + + + There is an inherent race condition between an endpoint starting new streams and the + remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream + identifier of the last peer-initiated stream which was or might be processed on the + sending endpoint in this connection. For instance, if the server sends a GOAWAY frame, + the identified stream is the highest numbered stream initiated by the client. + + + If the receiver of the GOAWAY has sent data on streams with a higher stream identifier + than what is indicated in the GOAWAY frame, those streams are not or will not be + processed. The receiver of the GOAWAY frame can treat the streams as though they had + never been created at all, thereby allowing those streams to be retried later on a new + connection. + + + Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote + can know whether a stream has been partially processed or not. For example, if an HTTP + client sends a POST at the same time that a server closes a connection, the client cannot + know if the server started to process that POST request if the server does not send a + GOAWAY frame to indicate what streams it might have acted on. + + + An endpoint might choose to close a connection without sending GOAWAY for misbehaving + peers. + + +
    + +
    + + The GOAWAY frame does not define any flags. + + + The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat + a GOAWAY frame with a stream identifier other than 0x0 as a connection error of type + PROTOCOL_ERROR. + + + The last stream identifier in the GOAWAY frame contains the highest numbered stream + identifier for which the sender of the GOAWAY frame might have taken some action on, or + might yet take action on. All streams up to and including the identified stream might + have been processed in some way. The last stream identifier can be set to 0 if no streams + were processed. + + + In this context, "processed" means that some data from the stream was passed to some + higher layer of software that might have taken some action as a result. + + + If a connection terminates without a GOAWAY frame, the last stream identifier is + effectively the highest possible stream identifier. + + + On streams with lower or equal numbered identifiers that were not closed completely prior + to the connection being closed, re-attempting requests, transactions, or any protocol + activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or + DELETE. Any protocol activity that uses higher numbered streams can be safely retried + using a new connection. + + + Activity on streams numbered lower or equal to the last stream identifier might still + complete successfully. The sender of a GOAWAY frame might gracefully shut down a + connection by sending a GOAWAY frame, maintaining the connection in an open state until + all in-progress streams complete. + + + An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an + endpoint that sends GOAWAY with NO_ERROR during graceful shutdown could + subsequently encounter an condition that requires immediate termination of the connection. + The last stream identifier from the last GOAWAY frame received indicates which streams + could have been acted upon. Endpoints MUST NOT increase the value they send in the last + stream identifier, since the peers might already have retried unprocessed requests on + another connection. + + + A client that is unable to retry requests loses all requests that are in flight when the + server closes the connection. This is especially true for intermediaries that might + not be serving clients using HTTP/2. A server that is attempting to gracefully shut down + a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to + 231-1 and a NO_ERROR code. This signals to the client that + a shutdown is imminent and that no further requests can be initiated. After waiting at + least one round trip time, the server can send another GOAWAY frame with an updated last + stream identifier. This ensures that a connection can be cleanly shut down without losing + requests. + + + + After sending a GOAWAY frame, the sender can discard frames for streams with identifiers + higher than the identified last stream. However, any frames that alter connection state + cannot be completely ignored. For instance, HEADERS, + PUSH_PROMISE and CONTINUATION frames MUST be minimally + processed to ensure the state maintained for header compression is consistent (see ); similarly DATA frames MUST be counted toward the connection flow + control window. Failure to process these frames can cause flow control or header + compression state to become unsynchronized. + + + + The GOAWAY frame also contains a 32-bit error code that + contains the reason for closing the connection. + + + Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug + data is intended for diagnostic purposes only and carries no semantic value. Debug + information could contain security- or privacy-sensitive data. Logged or otherwise + persistently stored debug data MUST have adequate safeguards to prevent unauthorized + access. + +
    + +
    + + The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see for an overview. + + + Flow control operates at two levels: on each individual stream and on the entire + connection. + + + Both types of flow control are hop-by-hop; that is, only between the two endpoints. + Intermediaries do not forward WINDOW_UPDATE frames between dependent connections. + However, throttling of data transfer by any receiver can indirectly cause the propagation + of flow control information toward the original sender. + + + Flow control only applies to frames that are identified as being subject to flow control. + Of the frame types defined in this document, this includes only DATA frames. + Frames that are exempt from flow control MUST be accepted and processed, unless the + receiver is unable to assign resources to handling the frame. A receiver MAY respond with + a stream error or connection error of type + FLOW_CONTROL_ERROR if it is unable to accept a frame. + +
    + +
    + + The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer + indicating the number of octets that the sender can transmit in addition to the existing + flow control window. The legal range for the increment to the flow control window is 1 to + 231-1 (0x7fffffff) octets. + + + The WINDOW_UPDATE frame does not define any flags. + + + The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the + former case, the frame's stream identifier indicates the affected stream; in the latter, + the value "0" indicates that the entire connection is the subject of the frame. + + + A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window + increment of 0 as a stream error of type + PROTOCOL_ERROR; errors on the connection flow control window MUST be + treated as a connection error. + + + WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag. + This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)" + or "closed" stream. A receiver MUST NOT treat this as an error, see . + + + A receiver that receives a flow controlled frame MUST always account for its contribution + against the connection flow control window, unless the receiver treats this as a connection error. This is necessary even if the + frame is in error. Since the sender counts the frame toward the flow control window, if + the receiver does not, the flow control window at sender and receiver can become + different. + + +
    + + Flow control in HTTP/2 is implemented using a window kept by each sender on every + stream. The flow control window is a simple integer value that indicates how many octets + of data the sender is permitted to transmit; as such, its size is a measure of the + buffering capacity of the receiver. + + + Two flow control windows are applicable: the stream flow control window and the + connection flow control window. The sender MUST NOT send a flow controlled frame with a + length that exceeds the space available in either of the flow control windows advertised + by the receiver. Frames with zero length with the END_STREAM flag set (that is, an + empty DATA frame) MAY be sent if there is no available space in either + flow control window. + + + For flow control calculations, the 9 octet frame header is not counted. + + + After sending a flow controlled frame, the sender reduces the space available in both + windows by the length of the transmitted frame. + + + The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up + space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream + and connection level flow control windows. + + + A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the + amount specified in the frame. + + + A sender MUST NOT allow a flow control window to exceed 231-1 octets. + If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this + maximum it MUST terminate either the stream or the connection, as appropriate. For + streams, the sender sends a RST_STREAM with the error code of + FLOW_CONTROL_ERROR code; for the connection, a GOAWAY + frame with a FLOW_CONTROL_ERROR code. + + + Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are + completely asynchronous with respect to each other. This property allows a receiver to + aggressively update the window size kept by the sender to prevent streams from stalling. + +
    + +
    + + When an HTTP/2 connection is first established, new streams are created with an initial + flow control window size of 65,535 octets. The connection flow control window is 65,535 + octets. Both endpoints can adjust the initial window size for new streams by including + a value for SETTINGS_INITIAL_WINDOW_SIZE in the SETTINGS + frame that forms part of the connection preface. The connection flow control window can + only be changed using WINDOW_UPDATE frames. + + + Prior to receiving a SETTINGS frame that sets a value for + SETTINGS_INITIAL_WINDOW_SIZE, an endpoint can only use the default + initial window size when sending flow controlled frames. Similarly, the connection flow + control window is set to the default initial window size until a WINDOW_UPDATE frame is + received. + + + A SETTINGS frame can alter the initial flow control window size for all + current streams. When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, + a receiver MUST adjust the size of all stream flow control windows that it maintains by + the difference between the new value and the old value. + + + A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available space in + a flow control window to become negative. A sender MUST track the negative flow control + window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE + frames that cause the flow control window to become positive. + + + For example, if the client sends 60KB immediately on connection establishment, and the + server sets the initial window size to be 16KB, the client will recalculate the + available flow control window to be -44KB on receipt of the SETTINGS + frame. The client retains a negative flow control window until WINDOW_UPDATE frames + restore the window to being positive, after which the client can resume sending. + + + A SETTINGS frame cannot alter the connection flow control window. + + + An endpoint MUST treat a change to SETTINGS_INITIAL_WINDOW_SIZE that + causes any flow control window to exceed the maximum size as a connection error of type + FLOW_CONTROL_ERROR. + +
    + +
    + + A receiver that wishes to use a smaller flow control window than the current size can + send a new SETTINGS frame. However, the receiver MUST be prepared to + receive data that exceeds this window size, since the sender might send data that + exceeds the lower limit prior to processing the SETTINGS frame. + + + After sending a SETTINGS frame that reduces the initial flow control window size, a + receiver has two options for handling streams that exceed flow control limits: + + + The receiver can immediately send RST_STREAM with + FLOW_CONTROL_ERROR error code for the affected streams. + + + The receiver can accept the streams and tolerate the resulting head of line + blocking, sending WINDOW_UPDATE frames as it consumes data. + + + +
    +
    + +
    + + The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments. Any number of CONTINUATION frames can + be sent on an existing stream, as long as the preceding frame is on the same stream and is + a HEADERS, PUSH_PROMISE or CONTINUATION frame without the + END_HEADERS flag set. + + +
    + +
    + + The CONTINUATION frame payload contains a header block + fragment. + + + + The CONTINUATION frame defines the following flag: + + + + Bit 3 being set indicates that this frame ends a header + block. + + + If the END_HEADERS bit is not set, this frame MUST be followed by another + CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or + a frame on a different stream as a connection + error of type PROTOCOL_ERROR. + + + + + + + The CONTINUATION frame changes the connection state as defined in . + + + + CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received + whose stream identifier field is 0x0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR. + + + + A CONTINUATION frame MUST be preceded by a HEADERS, + PUSH_PROMISE or CONTINUATION frame without the END_HEADERS flag set. A + recipient that observes violation of this rule MUST respond with a connection error of type + PROTOCOL_ERROR. + +
    +
    + +
    + + Error codes are 32-bit fields that are used in RST_STREAM and + GOAWAY frames to convey the reasons for the stream or connection error. + + + + Error codes share a common code space. Some error codes apply only to either streams or the + entire connection and have no defined semantics in the other context. + + + + The following error codes are defined: + + + The associated condition is not as a result of an error. For example, a + GOAWAY might include this code to indicate graceful shutdown of a + connection. + + + The endpoint detected an unspecific protocol error. This error is for use when a more + specific error code is not available. + + + The endpoint encountered an unexpected internal error. + + + The endpoint detected that its peer violated the flow control protocol. + + + The endpoint sent a SETTINGS frame, but did not receive a response in a + timely manner. See Settings Synchronization. + + + The endpoint received a frame after a stream was half closed. + + + The endpoint received a frame with an invalid size. + + + The endpoint refuses the stream prior to performing any application processing, see + for details. + + + Used by the endpoint to indicate that the stream is no longer needed. + + + The endpoint is unable to maintain the header compression context for the connection. + + + The connection established in response to a CONNECT + request was reset or abnormally closed. + + + The endpoint detected that its peer is exhibiting a behavior that might be generating + excessive load. + + + The underlying transport has properties that do not meet minimum security + requirements (see ). + + + + + Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be + treated by an implementation as being equivalent to INTERNAL_ERROR. + +
    + +
    + + HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means + that, from the application perspective, the features of the protocol are largely + unchanged. To achieve this, all request and response semantics are preserved, although the + syntax of conveying those semantics has changed. + + + Thus, the specification and requirements of HTTP/1.1 Semantics and Content , Conditional Requests , Range Requests , Caching and Authentication are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax + and Routing , such as the HTTP and HTTPS URI schemes, are also + applicable in HTTP/2, but the expression of those semantics for this protocol are defined + in the sections below. + + +
    + + A client sends an HTTP request on a new stream, using a previously unused stream identifier. A server sends an HTTP response on + the same stream as the request. + + + An HTTP message (request or response) consists of: + + + for a response only, zero or more HEADERS frames (each followed by zero + or more CONTINUATION frames) containing the message headers of + informational (1xx) HTTP responses (see and ), + and + + + one HEADERS frame (followed by zero or more CONTINUATION + frames) containing the message headers (see ), and + + + zero or more DATA frames containing the message payload (see ), and + + + optionally, one HEADERS frame, followed by zero or more + CONTINUATION frames containing the trailer-part, if present (see ). + + + The last frame in the sequence bears an END_STREAM flag, noting that a + HEADERS frame bearing the END_STREAM flag can be followed by + CONTINUATION frames that carry any remaining portions of the header block. + + + Other frames (from any stream) MUST NOT occur between either HEADERS frame + and any CONTINUATION frames that might follow. + + + + Trailing header fields are carried in a header block that also terminates the stream. + That is, a sequence starting with a HEADERS frame, followed by zero or more + CONTINUATION frames, where the HEADERS frame bears an + END_STREAM flag. Header blocks after the first that do not terminate the stream are not + part of an HTTP request or response. + + + A HEADERS frame (and associated CONTINUATION frames) can + only appear at the start or end of a stream. An endpoint that receives a + HEADERS frame without the END_STREAM flag set after receiving a final + (non-informational) status code MUST treat the corresponding request or response as malformed. + + + + An HTTP request/response exchange fully consumes a single stream. A request starts with + the HEADERS frame that puts the stream into an "open" state. The request + ends with a frame bearing END_STREAM, which causes the stream to become "half closed + (local)" for the client and "half closed (remote)" for the server. A response starts with + a HEADERS frame and ends with a frame bearing END_STREAM, which places the + stream in the "closed" state. + + + +
    + + HTTP/2 removes support for the 101 (Switching Protocols) informational status code + (). + + + The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol. + Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate + their use (see ). + +
    + +
    + + HTTP header fields carry information as a series of key-value pairs. For a listing of + registered HTTP headers, see the Message Header Field Registry maintained at . + + +
    + + While HTTP/1.x used the message start-line (see ) to convey the target URI and method of the request, and the + status code for the response, HTTP/2 uses special pseudo-header fields beginning with + ':' character (ASCII 0x3a) for this purpose. + + + Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate + pseudo-header fields other than those defined in this document. + + + Pseudo-header fields are only valid in the context in which they are defined. + Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header + fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST + NOT appear in trailers. Endpoints MUST treat a request or response that contains + undefined or invalid pseudo-header fields as malformed. + + + Just as in HTTP/1.x, header field names are strings of ASCII characters that are + compared in a case-insensitive fashion. However, header field names MUST be converted + to lowercase prior to their encoding in HTTP/2. A request or response containing + uppercase header field names MUST be treated as malformed. + + + All pseudo-header fields MUST appear in the header block before regular header fields. + Any request or response that contains a pseudo-header field that appears in a header + block after a regular header field MUST be treated as malformed. + +
    + +
    + + HTTP/2 does not use the Connection header field to + indicate connection-specific header fields; in this protocol, connection-specific + metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message + containing connection-specific header fields; any message containing + connection-specific header fields MUST be treated as malformed. + + + This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need + to remove any header fields nominated by the Connection header field, along with the + Connection header field itself. Such intermediaries SHOULD also remove other + connection-specific header fields, such as Keep-Alive, Proxy-Connection, + Transfer-Encoding and Upgrade, even if they are not nominated by Connection. + + + One exception to this is the TE header field, which MAY be present in an HTTP/2 + request, but when it is MUST NOT contain any value other than "trailers". + + + + + HTTP/2 purposefully does not support upgrade to another protocol. The handshake + methods described in are believed sufficient to + negotiate the use of alternative protocols. + + + +
    + +
    + + The following pseudo-header fields are defined for HTTP/2 requests: + + + + The :method pseudo-header field includes the HTTP + method (). + + + + + The :scheme pseudo-header field includes the scheme + portion of the target URI (). + + + :scheme is not restricted to http and https schemed URIs. A + proxy or gateway can translate requests for non-HTTP schemes, enabling the use + of HTTP to interact with non-HTTP services. + + + + + The :authority pseudo-header field includes the + authority portion of the target URI (). The authority MUST NOT include the deprecated userinfo subcomponent for http + or https schemed URIs. + + + To ensure that the HTTP/1.1 request line can be reproduced accurately, this + pseudo-header field MUST be omitted when translating from an HTTP/1.1 request + that has a request target in origin or asterisk form (see ). Clients that generate + HTTP/2 requests directly SHOULD use the :authority pseudo-header + field instead of the Host header field. An + intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a Host header field if one is not present in a request by + copying the value of the :authority pseudo-header + field. + + + + + The :path pseudo-header field includes the path and + query parts of the target URI (the path-absolute + production from and optionally a '?' character + followed by the query production, see and ). A request in asterisk form includes the value '*' for the + :path pseudo-header field. + + + This pseudo-header field MUST NOT be empty for http + or https URIs; http or + https URIs that do not contain a path component + MUST include a value of '/'. The exception to this rule is an OPTIONS request + for an http or https + URI that does not include a path component; these MUST include a :path pseudo-header field with a value of '*' (see ). + + + + + + All HTTP/2 requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request. An HTTP request that omits mandatory + pseudo-header fields is malformed. + + + HTTP/2 does not define a way to carry the version identifier that is included in the + HTTP/1.1 request line. + +
    + +
    + + For HTTP/2 responses, a single :status pseudo-header + field is defined that carries the HTTP status code field (see ). This pseudo-header field MUST be included in all + responses, otherwise the response is malformed. + + + HTTP/2 does not define a way to carry the version or reason phrase that is included in + an HTTP/1.1 status line. + +
    + +
    + + The Cookie header field can carry a significant amount of + redundant data. + + + The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs"). + This header field doesn't follow the list construction rules in HTTP (see ), which prevents cookie-pairs from + being separated into different name-value pairs. This can significantly reduce + compression efficiency as individual cookie-pairs are updated. + + + To allow for better compression efficiency, the Cookie header field MAY be split into + separate header fields, each with one or more cookie-pairs. If there are multiple + Cookie header fields after decompression, these MUST be concatenated into a single + octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ") + before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a + generic HTTP server application. + +
    + + Therefore, the following two lists of Cookie header fields are semantically + equivalent. + + +
    +
    + +
    + + A malformed request or response is one that is an otherwise valid sequence of HTTP/2 + frames, but is otherwise invalid due to the presence of extraneous frames, prohibited + header fields, the absence of mandatory header fields, or the inclusion of uppercase + header field names. + + + A request or response that includes an entity body can include a content-length header field. A request or response is also + malformed if the value of a content-length header field + does not equal the sum of the DATA frame payload lengths that form the + body. A response that is defined to have no payload, as described in , can have a non-zero + content-length header field, even though no content is + included in DATA frames. + + + Intermediaries that process HTTP requests or responses (i.e., any intermediary not + acting as a tunnel) MUST NOT forward a malformed request or response. Malformed + requests or responses that are detected MUST be treated as a stream error of type PROTOCOL_ERROR. + + + For malformed requests, a server MAY send an HTTP response prior to closing or + resetting the stream. Clients MUST NOT accept a malformed response. Note that these + requirements are intended to protect against several types of common attacks against + HTTP; they are deliberately strict, because being permissive can expose + implementations to these vulnerabilities. + +
    +
    + +
    + + This section shows HTTP/1.1 requests and responses, with illustrations of equivalent + HTTP/2 requests and responses. + + + An HTTP GET request includes request header fields and no body and is therefore + transmitted as a single HEADERS frame, followed by zero or more + CONTINUATION frames containing the serialized block of request header + fields. The HEADERS frame in the following has both the END_HEADERS and + END_STREAM flags set; no CONTINUATION frames are sent: + + +
    + + END_STREAM + Accept: image/jpeg + END_HEADERS + :method = GET + :scheme = https + :path = /resource + host = example.org + accept = image/jpeg +]]> +
    + + + Similarly, a response that includes only response header fields is transmitted as a + HEADERS frame (again, followed by zero or more + CONTINUATION frames) containing the serialized block of response header + fields. + + +
    + + END_STREAM + Expires: Thu, 23 Jan ... + END_HEADERS + :status = 304 + etag = "xyzzy" + expires = Thu, 23 Jan ... +]]> +
    + + + An HTTP POST request that includes request header fields and payload data is transmitted + as one HEADERS frame, followed by zero or more + CONTINUATION frames containing the request header fields, followed by one + or more DATA frames, with the last CONTINUATION (or + HEADERS) frame having the END_HEADERS flag set and the final + DATA frame having the END_STREAM flag set: + + +
    + - END_STREAM + Content-Type: image/jpeg - END_HEADERS + Content-Length: 123 :method = POST + :path = /resource + {binary data} :scheme = https + + CONTINUATION + + END_HEADERS + content-type = image/jpeg + host = example.org + content-length = 123 + + DATA + + END_STREAM + {binary data} +]]> + + Note that data contributing to any given header field could be spread between header + block fragments. The allocation of header fields to frames in this example is + illustrative only. + +
    + + + A response that includes header fields and payload data is transmitted as a + HEADERS frame, followed by zero or more CONTINUATION + frames, followed by one or more DATA frames, with the last + DATA frame in the sequence having the END_STREAM flag set: + + +
    + - END_STREAM + Content-Length: 123 + END_HEADERS + :status = 200 + {binary data} content-type = image/jpeg + content-length = 123 + + DATA + + END_STREAM + {binary data} +]]> +
    + + + Trailing header fields are sent as a header block after both the request or response + header block and all the DATA frames have been sent. The + HEADERS frame starting the trailers header block has the END_STREAM flag + set. + + +
    + - END_STREAM + Transfer-Encoding: chunked + END_HEADERS + Trailer: Foo :status = 200 + content-length = 123 + 123 content-type = image/jpeg + {binary data} trailer = Foo + 0 + Foo: bar DATA + - END_STREAM + {binary data} + + HEADERS + + END_STREAM + + END_HEADERS + foo = bar +]]> +
    + + +
    + + An informational response using a 1xx status code other than 101 is transmitted as a + HEADERS frame, followed by zero or more CONTINUATION + frames: + + - END_STREAM + + END_HEADERS + :status = 103 + extension-field = bar +]]> +
    +
    + +
    + + In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error + occurs, because there is no means to determine the nature of the error. It is possible + that some server processing occurred prior to the error, which could result in + undesirable effects if the request were reattempted. + + + HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has + not been processed: + + + The GOAWAY frame indicates the highest stream number that might have + been processed. Requests on streams with higher numbers are therefore guaranteed to + be safe to retry. + + + The REFUSED_STREAM error code can be included in a + RST_STREAM frame to indicate that the stream is being closed prior to + any processing having occurred. Any request that was sent on the reset stream can + be safely retried. + + + + + Requests that have not been processed have not failed; clients MAY automatically retry + them, even those with non-idempotent methods. + + + A server MUST NOT indicate that a stream has not been processed unless it can guarantee + that fact. If frames that are on a stream are passed to the application layer for any + stream, then REFUSED_STREAM MUST NOT be used for that stream, and a + GOAWAY frame MUST include a stream identifier that is greater than or + equal to the given stream identifier. + + + In addition to these mechanisms, the PING frame provides a way for a + client to easily test a connection. Connections that remain idle can become broken as + some middleboxes (for instance, network address translators, or load balancers) silently + discard connection bindings. The PING frame allows a client to safely + test whether a connection is still active without sending a request. + +
    +
    + +
    + + HTTP/2 allows a server to pre-emptively send (or "push") responses (along with + corresponding "promised" requests) to a client in association with a previous + client-initiated request. This can be useful when the server knows the client will need + to have those responses available in order to fully process the response to the original + request. + + + + Pushing additional message exchanges in this fashion is optional, and is negotiated + between individual endpoints. The SETTINGS_ENABLE_PUSH setting can be set + to 0 to indicate that server push is disabled. + + + Promised requests MUST be cacheable (see ), MUST be safe (see ) and MUST NOT include a request body. Clients that receive a + promised request that is not cacheable, unsafe or that includes a request body MUST + reset the stream with a stream error of type + PROTOCOL_ERROR. + + + Pushed responses that are cacheable (see ) can be stored by the client, if it implements a HTTP + cache. Pushed responses are considered successfully validated on the origin server (e.g., + if the "no-cache" cache response directive is present) while the stream identified by the + promised stream ID is still open. + + + Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY + be made available to the application separately. + + + An intermediary can receive pushes from the server and choose not to forward them on to + the client. In other words, how to make use of the pushed information is up to that + intermediary. Equally, the intermediary might choose to make additional pushes to the + client, without any action taken by the server. + + + A client cannot push. Thus, servers MUST treat the receipt of a + PUSH_PROMISE frame as a connection + error of type PROTOCOL_ERROR. Clients MUST reject any attempt to + change the SETTINGS_ENABLE_PUSH setting to a value other than 0 by treating + the message as a connection error of type + PROTOCOL_ERROR. + + +
    + + Server push is semantically equivalent to a server responding to a request; however, in + this case that request is also sent by the server, as a PUSH_PROMISE + frame. + + + The PUSH_PROMISE frame includes a header block that contains a complete + set of request header fields that the server attributes to the request. It is not + possible to push a response to a request that includes a request body. + + + + Pushed responses are always associated with an explicit request from the client. The + PUSH_PROMISE frames sent by the server are sent on that explicit + request's stream. The PUSH_PROMISE frame also includes a promised stream + identifier, chosen from the stream identifiers available to the server (see ). + + + + The header fields in PUSH_PROMISE and any subsequent + CONTINUATION frames MUST be a valid and complete set of request header fields. The server MUST include a method in + the :method header field that is safe and cacheable. If a + client receives a PUSH_PROMISE that does not include a complete and valid + set of header fields, or the :method header field identifies + a method that is not safe, it MUST respond with a stream error of type PROTOCOL_ERROR. + + + + The server SHOULD send PUSH_PROMISE () + frames prior to sending any frames that reference the promised responses. This avoids a + race where clients issue requests prior to receiving any PUSH_PROMISE + frames. + + + For example, if the server receives a request for a document containing embedded links + to multiple image files, and the server chooses to push those additional images to the + client, sending push promises before the DATA frames that contain the + image links ensures that the client is able to see the promises before discovering + embedded links. Similarly, if the server pushes responses referenced by the header block + (for instance, in Link header fields), sending the push promises before sending the + header block ensures that clients do not request them. + + + + PUSH_PROMISE frames MUST NOT be sent by the client. + + + PUSH_PROMISE frames can be sent by the server in response to any + client-initiated stream, but the stream MUST be in either the "open" or "half closed + (remote)" state with respect to the server. PUSH_PROMISE frames are + interspersed with the frames that comprise a response, though they cannot be + interspersed with HEADERS and CONTINUATION frames that + comprise a single header block. + + + Sending a PUSH_PROMISE frame creates a new stream and puts the stream + into the “reserved (local)” state for the server and the “reserved (remote)” state for + the client. + +
    + +
    + + After sending the PUSH_PROMISE frame, the server can begin delivering the + pushed response as a response on a server-initiated + stream that uses the promised stream identifier. The server uses this stream to + transmit an HTTP response, using the same sequence of frames as defined in . This stream becomes "half closed" + to the client after the initial HEADERS frame is sent. + + + + Once a client receives a PUSH_PROMISE frame and chooses to accept the + pushed response, the client SHOULD NOT issue any requests for the promised response + until after the promised stream has closed. + + + + If the client determines, for any reason, that it does not wish to receive the pushed + response from the server, or if the server takes too long to begin sending the promised + response, the client can send an RST_STREAM frame, using either the + CANCEL or REFUSED_STREAM codes, and referencing the pushed + stream's identifier. + + + A client can use the SETTINGS_MAX_CONCURRENT_STREAMS setting to limit the + number of responses that can be concurrently pushed by a server. Advertising a + SETTINGS_MAX_CONCURRENT_STREAMS value of zero disables server push by + preventing the server from creating the necessary streams. This does not prohibit a + server from sending PUSH_PROMISE frames; clients need to reset any + promised streams that are not wanted. + + + + Clients receiving a pushed response MUST validate that either the server is + authoritative (see ), or the proxy that provided the pushed + response is configured for the corresponding request. For example, a server that offers + a certificate for only the example.com DNS-ID or Common Name + is not permitted to push a response for https://www.example.org/doc. + + + The response for a PUSH_PROMISE stream begins with a + HEADERS frame, which immediately puts the stream into the “half closed + (remote)” state for the server and “half closed (local)” state for the client, and ends + with a frame bearing END_STREAM, which places the stream in the "closed" state. + + + The client never sends a frame with the END_STREAM flag for a server push. + + + +
    + +
    + +
    + + In HTTP/1.x, the pseudo-method CONNECT () is used to convert an HTTP connection into a tunnel to a remote host. + CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin + server for the purposes of interacting with https resources. + + + In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to + a remote host, for similar purposes. The HTTP header field mapping works as defined in + Request Header Fields, with a few + differences. Specifically: + + + The :method header field is set to CONNECT. + + + The :scheme and :path header + fields MUST be omitted. + + + The :authority header field contains the host and port to + connect to (equivalent to the authority-form of the request-target of CONNECT + requests, see ). + + + + + A proxy that supports CONNECT establishes a TCP connection to + the server identified in the :authority header field. Once + this connection is successfully established, the proxy sends a HEADERS + frame containing a 2xx series status code to the client, as defined in . + + + After the initial HEADERS frame sent by each peer, all subsequent + DATA frames correspond to data sent on the TCP connection. The payload of + any DATA frames sent by the client is transmitted by the proxy to the TCP + server; data received from the TCP server is assembled into DATA frames by + the proxy. Frame types other than DATA or stream management frames + (RST_STREAM, WINDOW_UPDATE, and PRIORITY) + MUST NOT be sent on a connected stream, and MUST be treated as a stream error if received. + + + The TCP connection can be closed by either peer. The END_STREAM flag on a + DATA frame is treated as being equivalent to the TCP FIN bit. A client is + expected to send a DATA frame with the END_STREAM flag set after receiving + a frame bearing the END_STREAM flag. A proxy that receives a DATA frame + with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP + segment. A proxy that receives a TCP segment with the FIN bit set sends a + DATA frame with the END_STREAM flag set. Note that the final TCP segment + or DATA frame could be empty. + + + A TCP connection error is signaled with RST_STREAM. A proxy treats any + error in the TCP connection, which includes receiving a TCP segment with the RST bit set, + as a stream error of type + CONNECT_ERROR. Correspondingly, a proxy MUST send a TCP segment with the + RST bit set if it detects an error with the stream or the HTTP/2 connection. + +
    +
    + +
    + + This section outlines attributes of the HTTP protocol that improve interoperability, reduce + exposure to known security vulnerabilities, or reduce the potential for implementation + variation. + + +
    + + HTTP/2 connections are persistent. For best performance, it is expected clients will not + close connections until it is determined that no further communication with a server is + necessary (for example, when a user navigates away from a particular web page), or until + the server closes the connection. + + + Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair, + where host is derived from a URI, a selected alternative + service, or a configured proxy. + + + A client can create additional connections as replacements, either to replace connections + that are near to exhausting the available stream + identifier space, to refresh the keying material for a TLS connection, or to + replace connections that have encountered errors. + + + A client MAY open multiple connections to the same IP address and TCP port using different + Server Name Indication values or to provide different TLS + client certificates, but SHOULD avoid creating multiple connections with the same + configuration. + + + Servers are encouraged to maintain open connections for as long as possible, but are + permitted to terminate idle connections if necessary. When either endpoint chooses to + close the transport-layer TCP connection, the terminating endpoint SHOULD first send a + GOAWAY () frame so that both endpoints can reliably + determine whether previously sent frames have been processed and gracefully complete or + terminate any necessary remaining tasks. + + +
    + + Connections that are made to an origin servers, either directly or through a tunnel + created using the CONNECT method MAY be reused for + requests with multiple different URI authority components. A connection can be reused + as long as the origin server is authoritative. For + http resources, this depends on the host having resolved to + the same IP address. + + + For https resources, connection reuse additionally depends + on having a certificate that is valid for the host in the URI. An origin server might + offer a certificate with multiple subjectAltName attributes, + or names with wildcards, one of which is valid for the authority in the URI. For + example, a certificate with a subjectAltName of *.example.com might permit the use of the same connection for + requests to URIs starting with https://a.example.com/ and + https://b.example.com/. + + + In some deployments, reusing a connection for multiple origins can result in requests + being directed to the wrong origin server. For example, TLS termination might be + performed by a middlebox that uses the TLS Server Name Indication + (SNI) extension to select an origin server. This means that it is possible + for clients to send confidential information to servers that might not be the intended + target for the request, even though the server is otherwise authoritative. + + + A server that does not wish clients to reuse connections can indicate that it is not + authoritative for a request by sending a 421 (Misdirected Request) status code in response + to the request (see ). + + + A client that is configured to use a proxy over HTTP/2 directs requests to that proxy + through a single connection. That is, all requests sent via a proxy reuse the + connection to the proxy. + +
    + +
    + + The 421 (Misdirected Request) status code indicates that the request was directed at a + server that is not able to produce a response. This can be sent by a server that is not + configured to produce responses for the combination of scheme and authority that are + included in the request URI. + + + Clients receiving a 421 (Misdirected Request) response from a server MAY retry the + request - whether the request method is idempotent or not - over a different connection. + This is possible if a connection is reused () or if an alternative + service is selected (). + + + This status code MUST NOT be generated by proxies. + + + A 421 response is cacheable by default; i.e., unless otherwise indicated by the method + definition or explicit cache controls (see ). + +
    +
    + +
    + + Implementations of HTTP/2 MUST support TLS 1.2 for HTTP/2 over + TLS. The general TLS usage guidance in SHOULD be followed, with + some additional restrictions that are specific to HTTP/2. + + + + An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on + feature set and cipher suite described in this section. Due to implementation + limitations, it might not be possible to fail TLS negotiation. An endpoint MUST + immediately terminate an HTTP/2 connection that does not meet these minimum requirements + with a connection error of type + INADEQUATE_SECURITY. + + +
    + + The TLS implementation MUST support the Server Name Indication + (SNI) extension to TLS. HTTP/2 clients MUST indicate the target domain name when + negotiating TLS. + + + The TLS implementation MUST disable compression. TLS compression can lead to the + exposure of information that would not otherwise be revealed . + Generic compression is unnecessary since HTTP/2 provides compression features that are + more aware of context and therefore likely to be more appropriate for use for + performance, security or other reasons. + + + The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS + renegotiation as a connection error of type + PROTOCOL_ERROR. Note that disabling renegotiation can result in + long-lived connections becoming unusable due to limits on the number of messages the + underlying cipher suite can encipher. + + + A client MAY use renegotiation to provide confidentiality protection for client + credentials offered in the handshake, but any renegotiation MUST occur prior to sending + the connection preface. A server SHOULD request a client certificate if it sees a + renegotiation request immediately after establishing a connection. + + + This effectively prevents the use of renegotiation in response to a request for a + specific protected resource. A future specification might provide a way to support this + use case. + +
    + +
    + + The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST + only be used with cipher suites that have ephemeral key exchange, such as the ephemeral Diffie-Hellman (DHE) or the elliptic curve variant (ECDHE). Ephemeral key exchange MUST + have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE. + Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher + suites that use stream or block ciphers. Authenticated Encryption with Additional Data + (AEAD) modes, such as the Galois Counter Model (GCM) mode for + AES are acceptable. + + + The effect of these restrictions is that TLS 1.2 implementations could have + non-intersecting sets of available cipher suites, since these prevent the use of the + cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of + HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 with P256 . + + + Clients MAY advertise support of cipher suites that are prohibited by the above + restrictions in order to allow for connection to servers that do not support HTTP/2. + This enables a fallback to protocols without these constraints without the additional + latency imposed by using a separate connection for fallback. + +
    +
    +
    + +
    +
    + + HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is + authoritative in providing a given response, see . This relies on local name resolution for the "http" + URI scheme, and the authenticated server identity for the "https" scheme (see ). + +
    + +
    + + In a cross-protocol attack, an attacker causes a client to initiate a transaction in one + protocol toward a server that understands a different protocol. An attacker might be able + to cause the transaction to appear as valid transaction in the second protocol. In + combination with the capabilities of the web context, this can be used to interact with + poorly protected servers in private networks. + + + Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient + protection against cross protocol attacks. ALPN provides a positive indication that a + server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based + protocols. + + + The encryption in TLS makes it difficult for attackers to control the data which could be + used in a cross-protocol attack on a cleartext protocol. + + + The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks. + The connection preface contains a string that is + designed to confuse HTTP/1.1 servers, but no special protection is offered for other + protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an + Upgrade header field in addition to the client connection preface could be exposed to a + cross-protocol attack. + +
    + +
    + + HTTP/2 header field names and values are encoded as sequences of octets with a length + prefix. This enables HTTP/2 to carry any string of octets as the name or value of a + header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1 + directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might + exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal + header fields, extra header fields, or even new messages that are entirely falsified. + + + Header field names or values that contain characters not permitted by HTTP/1.1, including + carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an + intermediary, as stipulated in . + + + Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker. + Intermediaries that perform translation to HTTP/2 MUST remove any instances of the obs-fold production from header field values. + +
    + +
    + + Pushed responses do not have an explicit request from the client; the request + is provided by the server in the PUSH_PROMISE frame. + + + Caching responses that are pushed is possible based on the guidance provided by the origin + server in the Cache-Control header field. However, this can cause issues if a single + server hosts more than one tenant. For example, a server might offer multiple users each + a small portion of its URI space. + + + Where multiple tenants share space on the same server, that server MUST ensure that + tenants are not able to push representations of resources that they do not have authority + over. Failure to enforce this would allow a tenant to provide a representation that would + be served out of cache, overriding the actual representation that the authoritative tenant + provides. + + + Pushed responses for which an origin server is not authoritative (see + ) are never cached or used. + +
    + +
    + + An HTTP/2 connection can demand a greater commitment of resources to operate than a + HTTP/1.1 connection. The use of header compression and flow control depend on a + commitment of resources for storing a greater amount of state. Settings for these + features ensure that memory commitments for these features are strictly bounded. + + + The number of PUSH_PROMISE frames is not constrained in the same fashion. + A client that accepts server push SHOULD limit the number of streams it allows to be in + the "reserved (remote)" state. Excessive number of server push streams can be treated as + a stream error of type + ENHANCE_YOUR_CALM. + + + Processing capacity cannot be guarded as effectively as state capacity. + + + The SETTINGS frame can be abused to cause a peer to expend additional + processing time. This might be done by pointlessly changing SETTINGS parameters, setting + multiple undefined parameters, or changing the same setting multiple times in the same + frame. WINDOW_UPDATE or PRIORITY frames can be abused to + cause an unnecessary waste of resources. + + + Large numbers of small or empty frames can be abused to cause a peer to expend time + processing frame headers. Note however that some uses are entirely legitimate, such as + the sending of an empty DATA frame to end a stream. + + + Header compression also offers some opportunities to waste processing resources; see for more details on potential abuses. + + + Limits in SETTINGS parameters cannot be reduced instantaneously, which + leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In + particular, immediately after establishing a connection, limits set by a server are not + known to clients and could be exceeded without being an obvious protocol violation. + + + All these features - i.e., SETTINGS changes, small frames, header + compression - have legitimate uses. These features become a burden only when they are + used unnecessarily or to excess. + + + An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of + service attack. Implementations SHOULD track the use of these features and set limits on + their use. An endpoint MAY treat activity that is suspicious as a connection error of type + ENHANCE_YOUR_CALM. + + +
    + + A large header block can cause an implementation to + commit a large amount of state. Header fields that are critical for routing can appear + toward the end of a header block, which prevents streaming of header fields to their + ultimate destination. For this an other reasons, such as ensuring cache correctness, + means that an endpoint might need to buffer the entire header block. Since there is no + hard limit to the size of a header block, some endpoints could be forced commit a large + amount of available memory for header fields. + + + An endpoint can use the SETTINGS_MAX_HEADER_LIST_SIZE to advise peers of + limits that might apply on the size of header blocks. This setting is only advisory, so + endpoints MAY choose to send header blocks that exceed this limit and risk having the + request or response being treated as malformed. This setting specific to a connection, + so any request or response could encounter a hop with a lower, unknown limit. An + intermediary can attempt to avoid this problem by passing on values presented by + different peers, but they are not obligated to do so. + + + A server that receives a larger header block than it is willing to handle can send an + HTTP 431 (Request Header Fields Too Large) status code . A + client can discard responses that it cannot process. The header block MUST be processed + to ensure a consistent connection state, unless the connection is closed. + +
    +
    + +
    + + HTTP/2 enables greater use of compression for both header fields () and entity bodies. Compression can allow an attacker to recover + secret data when it is compressed in the same context as data under attacker control. + + + There are demonstrable attacks on compression that exploit the characteristics of the web + (e.g., ). The attacker induces multiple requests containing + varying plaintext, observing the length of the resulting ciphertext in each, which + reveals a shorter length when a guess about the secret is correct. + + + Implementations communicating on a secure channel MUST NOT compress content that includes + both confidential and attacker-controlled data unless separate compression dictionaries + are used for each source of data. Compression MUST NOT be used if the source of data + cannot be reliably determined. Generic stream compression, such as that provided by TLS + MUST NOT be used with HTTP/2 (). + + + Further considerations regarding the compression of header fields are described in . + +
    + +
    + + Padding within HTTP/2 is not intended as a replacement for general purpose padding, such + as might be provided by TLS. Redundant padding could even be + counterproductive. Correct application can depend on having specific knowledge of the + data that is being padded. + + + To mitigate attacks that rely on compression, disabling or limiting compression might be + preferable to padding as a countermeasure. + + + Padding can be used to obscure the exact size of frame content, and is provided to + mitigate specific attacks within HTTP. For example, attacks where compressed content + includes both attacker-controlled plaintext and secret data (see for example, ). + + + Use of padding can result in less protection than might seem immediately obvious. At + best, padding only makes it more difficult for an attacker to infer length information by + increasing the number of frames an attacker has to observe. Incorrectly implemented + padding schemes can be easily defeated. In particular, randomized padding with a + predictable distribution provides very little protection; similarly, padding payloads to a + fixed size exposes information as payload sizes cross the fixed size boundary, which could + be possible if an attacker can control plaintext. + + + Intermediaries SHOULD retain padding for DATA frames, but MAY drop padding + for HEADERS and PUSH_PROMISE frames. A valid reason for an + intermediary to change the amount of padding of frames is to improve the protections that + padding provides. + +
    + +
    + + Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions + of a single client or server over time. This includes the value of settings, the manner + in which flow control windows are managed, the way priorities are allocated to streams, + timing of reactions to stimulus, and handling of any optional features. + + + As far as this creates observable differences in behavior, they could be used as a basis + for fingerprinting a specific client, as defined in . + +
    +
    + +
    + + A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation + (ALPN) Protocol IDs" registry established in . + + + This document establishes a registry for frame types, settings, and error codes. These new + registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section. + + + This document registers the HTTP2-Settings header field for + use in HTTP; and the 421 (Misdirected Request) status code. + + + This document registers the PRI method for use in HTTP, to avoid + collisions with the connection preface. + + +
    + + This document creates two registrations for the identification of HTTP/2 in the + "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in . + + + The "h2" string identifies HTTP/2 when used over TLS: + + HTTP/2 over TLS + 0x68 0x32 ("h2") + This document + + + + The "h2c" string identifies HTTP/2 when used over cleartext TCP: + + HTTP/2 over TCP + 0x68 0x32 0x63 ("h2c") + This document + + +
    + +
    + + This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame + Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under + either of the "IETF Review" or "IESG Approval" policies for + values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for + experimental use. + + + New entries in this registry require the following information: + + + A name or label for the frame type. + + + The 8-bit code assigned to the frame type. + + + A reference to a specification that includes a description of the frame layout, + it's semantics and flags that the frame type uses, including any parts of the frame + that are conditionally present based on the value of flags. + + + + + The entries in the following table are registered by this document. + + + Frame Type + Code + Section + DATA0x0 + HEADERS0x1 + PRIORITY0x2 + RST_STREAM0x3 + SETTINGS0x4 + PUSH_PROMISE0x5 + PING0x6 + GOAWAY0x7 + WINDOW_UPDATE0x8 + CONTINUATION0x9 + +
    + +
    + + This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry + manages a 16-bit space. The "HTTP/2 Settings" registry operates under the "Expert Review" policy for values in the range from 0x0000 to + 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use. + + + New registrations are advised to provide the following information: + + + A symbolic name for the setting. Specifying a setting name is optional. + + + The 16-bit code assigned to the setting. + + + An initial value for the setting. + + + An optional reference to a specification that describes the use of the setting. + + + + + An initial set of setting registrations can be found in . + + + Name + Code + Initial Value + Specification + HEADER_TABLE_SIZE + 0x14096 + ENABLE_PUSH + 0x21 + MAX_CONCURRENT_STREAMS + 0x3(infinite) + INITIAL_WINDOW_SIZE + 0x465535 + MAX_FRAME_SIZE + 0x516384 + MAX_HEADER_LIST_SIZE + 0x6(infinite) + + +
    + +
    + + This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code" + registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the + "Expert Review" policy. + + + Registrations for error codes are required to include a description of the error code. An + expert reviewer is advised to examine new registrations for possible duplication with + existing error codes. Use of existing registrations is to be encouraged, but not + mandated. + + + New registrations are advised to provide the following information: + + + A name for the error code. Specifying an error code name is optional. + + + The 32-bit error code value. + + + A brief description of the error code semantics, longer if no detailed specification + is provided. + + + An optional reference for a specification that defines the error code. + + + + + The entries in the following table are registered by this document. + + + Name + Code + Description + Specification + NO_ERROR0x0 + Graceful shutdown + + PROTOCOL_ERROR0x1 + Protocol error detected + + INTERNAL_ERROR0x2 + Implementation fault + + FLOW_CONTROL_ERROR0x3 + Flow control limits exceeded + + SETTINGS_TIMEOUT0x4 + Settings not acknowledged + + STREAM_CLOSED0x5 + Frame received for closed stream + + FRAME_SIZE_ERROR0x6 + Frame size incorrect + + REFUSED_STREAM0x7 + Stream not processed + + CANCEL0x8 + Stream cancelled + + COMPRESSION_ERROR0x9 + Compression state not updated + + CONNECT_ERROR0xa + TCP connection error for CONNECT method + + ENHANCE_YOUR_CALM0xb + Processing capacity exceeded + + INADEQUATE_SECURITY0xc + Negotiated TLS parameters not acceptable + + + +
    + +
    + + This section registers the HTTP2-Settings header field in the + Permanent Message Header Field Registry. + + + HTTP2-Settings + + + http + + + standard + + + IETF + + + of this document + + + This header field is only used by an HTTP/2 client for Upgrade-based negotiation. + + + +
    + +
    + + This section registers the PRI method in the HTTP Method + Registry (). + + + PRI + + + No + + + No + + + of this document + + + This method is never used by an actual client. This method will appear to be used + when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection + preface. + + + +
    + +
    + + This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext + Transfer Protocol (HTTP) Status Code Registry (). + + + + + 421 + + + Misdirected Request + + + of this document + + + +
    + +
    + +
    + + This document includes substantial input from the following individuals: + + + Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin + Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin + Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY + contributors). + + + Gabriel Montenegro and Willy Tarreau (Upgrade mechanism). + + + William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto + Peon, Rob Trace (Flow control). + + + Mike Bishop (Extensibility). + + + Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan + (Substantial editorial contributions). + + + Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp. + + + Alexey Melnikov was an editor of this document during 2013. + + + A substantial proportion of Martin's contribution was supported by Microsoft during his + employment there. + + + +
    +
    + + + + + + HPACK - Header Compression for HTTP/2 + + + + + + + + + + + + Transmission Control Protocol + + + University of Southern California (USC)/Information Sciences + Institute + + + + + + + + + + + Key words for use in RFCs to Indicate Requirement Levels + + + Harvard University +
    sob@harvard.edu
    +
    + +
    + + +
    + + + + + HTTP Over TLS + + + + + + + + + + Uniform Resource Identifier (URI): Generic + Syntax + + + + + + + + + + + + The Base16, Base32, and Base64 Data Encodings + + + + + + + + + Guidelines for Writing an IANA Considerations Section in RFCs + + + + + + + + + + + Augmented BNF for Syntax Specifications: ABNF + + + + + + + + + + + The Transport Layer Security (TLS) Protocol Version 1.2 + + + + + + + + + + + Transport Layer Security (TLS) Extensions: Extension Definitions + + + + + + + + + + Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension + + + + + + + + + + + + + TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois + Counter Mode (GCM) + + + + + + + + + + + Digital Signature Standard (DSS) + + NIST + + + + + + + + + Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + + Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Range Requests + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + World Wide Web Consortium +
    ylafon@w3.org
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Caching + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + Akamai +
    mnot@mnot.net
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Authentication + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + + HTTP State Management Mechanism + + + + + +
    + + + + + + TCP Extensions for High Performance + + + + + + + + + + + + Transport Layer Security Protocol Compression Methods + + + + + + + + + Additional HTTP Status Codes + + + + + + + + + + + Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS) + + + + + + + + + + + + + + + AES Galois Counter Mode (GCM) Cipher Suites for TLS + + + + + + + + + + + + HTML5 + + + + + + + + + + + Latest version available at + . + + + + + + + Talking to Yourself for Fun and Profit + + + + + + + + + + + + + + BREACH: Reviving the CRIME Attack + + + + + + + + + + + Registration Procedures for Message Header Fields + + Nine by Nine +
    GK-IETF@ninebynine.org
    +
    + + BEA Systems +
    mnot@pobox.com
    +
    + + HP Labs +
    JeffMogul@acm.org
    +
    + +
    + + +
    + + + + Recommendations for Secure Use of TLS and DTLS + + + + + + + + + + + + + + + + + + HTTP Alternative Services + + + Akamai + + + Mozilla + + + greenbytes + + + + + + +
    + +
    + + This section is to be removed by RFC Editor before publication. + + +
    + + Renamed Not Authoritative status code to Misdirected Request. + +
    + +
    + + Pseudo-header fields are now required to appear strictly before regular ones. + + + Restored 1xx series status codes, except 101. + + + Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting + to limit the damage. + + + Added a setting to advise peers of header set size limits. + + + Removed segments. + + + Made non-semantic-bearing HEADERS frames illegal in the HTTP mapping. + +
    + +
    + + Restored extensibility options. + + + Restricting TLS cipher suites to AEAD only. + + + Removing Content-Encoding requirements. + + + Permitting the use of PRIORITY after stream close. + + + Removed ALTSVC frame. + + + Removed BLOCKED frame. + + + Reducing the maximum padding size to 256 octets; removing padding from + CONTINUATION frames. + + + Removed per-frame GZIP compression. + +
    + +
    + + Added BLOCKED frame (at risk). + + + Simplified priority scheme. + + + Added DATA per-frame GZIP compression. + +
    + +
    + + Changed "connection header" to "connection preface" to avoid confusion. + + + Added dependency-based stream prioritization. + + + Added "h2c" identifier to distinguish between cleartext and secured HTTP/2. + + + Adding missing padding to PUSH_PROMISE. + + + Integrate ALTSVC frame and supporting text. + + + Dropping requirement on "deflate" Content-Encoding. + + + Improving security considerations around use of compression. + +
    + +
    + + Adding padding for data frames. + + + Renumbering frame types, error codes, and settings. + + + Adding INADEQUATE_SECURITY error code. + + + Updating TLS usage requirements to 1.2; forbidding TLS compression. + + + Removing extensibility for frames and settings. + + + Changing setting identifier size. + + + Removing the ability to disable flow control. + + + Changing the protocol identification token to "h2". + + + Changing the use of :authority to make it optional and to allow userinfo in non-HTTP + cases. + + + Allowing split on 0x0 for Cookie. + + + Reserved PRI method in HTTP/1.1 to avoid possible future collisions. + +
    + +
    + + Added cookie crumbling for more efficient header compression. + + + Added header field ordering with the value-concatenation mechanism. + +
    + +
    + + Marked draft for implementation. + +
    + +
    + + Adding definition for CONNECT method. + + + Constraining the use of push to safe, cacheable methods with no request body. + + + Changing from :host to :authority to remove any potential confusion. + + + Adding setting for header compression table size. + + + Adding settings acknowledgement. + + + Removing unnecessary and potentially problematic flags from CONTINUATION. + + + Added denial of service considerations. + +
    +
    + + Marking the draft ready for implementation. + + + Renumbering END_PUSH_PROMISE flag. + + + Editorial clarifications and changes. + +
    + +
    + + Added CONTINUATION frame for HEADERS and PUSH_PROMISE. + + + PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is + zero. + + + Push expanded to allow all safe methods without a request body. + + + Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1 + hop-by-hop header fields. + + + Requiring that intermediaries not forward requests with missing or illegal routing + :-headers. + + + Clarified requirements around handling different frames after stream close, stream reset + and GOAWAY. + + + Added more specific prohibitions for sending of different frame types in various stream + states. + + + Making the last received setting value the effective value. + + + Clarified requirements on TLS version, extension and ciphers. + +
    + +
    + + Committed major restructuring atrocities. + + + Added reference to first header compression draft. + + + Added more formal description of frame lifecycle. + + + Moved END_STREAM (renamed from FINAL) back to HEADERS/DATA. + + + Removed HEADERS+PRIORITY, added optional priority to HEADERS frame. + + + Added PRIORITY frame. + +
    + +
    + + Added continuations to frames carrying header blocks. + + + Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful + concepts, like cookies. + + + Removed "message". + + + Switched to TLS ALPN from NPN. + + + Editorial changes. + +
    + +
    + + Added IANA considerations section for frame types, error codes and settings. + + + Removed data frame compression. + + + Added PUSH_PROMISE. + + + Added globally applicable flags to framing. + + + Removed zlib-based header compression mechanism. + + + Updated references. + + + Clarified stream identifier reuse. + + + Removed CREDENTIALS frame and associated mechanisms. + + + Added advice against naive implementation of flow control. + + + Added session header section. + + + Restructured frame header. Removed distinction between data and control frames. + + + Altered flow control properties to include session-level limits. + + + Added note on cacheability of pushed resources and multiple tenant servers. + + + Changed protocol label form based on discussions. + +
    + +
    + + Changed title throughout. + + + Removed section on Incompatibilities with SPDY draft#2. + + + Changed INTERNAL_ERROR on GOAWAY to have a value of 2 . + + + Replaced abstract and introduction. + + + Added section on starting HTTP/2.0, including upgrade mechanism. + + + Removed unused references. + + + Added flow control principles based on . + +
    + +
    + + Adopted as base for draft-ietf-httpbis-http2. + + + Updated authors/editors list. + + + Added status note. + +
    +
    + +
    +
    + diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go new file mode 100644 index 000000000..e6b321f4b --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport.go @@ -0,0 +1,2303 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. + +package http2 + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "math" + mathrand "math/rand" + "net" + "net/http" + "sort" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" + "golang.org/x/net/lex/httplex" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. +type Transport struct { + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allowed. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It requires Go 1.6 or later and returns an error if the net/http package is too old +// or if t1 has already been HTTP/2-enabled. +func ConfigureTransport(t1 *http.Transport) error { + _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + return err +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + singleUse bool // whether being used for a single http.Request + + // readLoop goroutine fields: + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + + idleTimeout time.Duration // or 0 for never + idleTimer *time.Timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closed bool + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + bw *bufio.Writer + br *bufio.Reader + fr *Framer + lastActive time.Time + // Settings from peer: (also guarded by mu) + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + initialWindowSize uint32 + + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred +} + +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. +type clientStream struct { + cc *ClientConn + req *http.Request + trace *clientTrace // or nil + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + startedWrite bool // started request body write; guarded by cc.mu + requestedGzip bool + on100 func() // optional code to run if get a 100 continue response + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel waits for the user to cancel a request or for the done +// channel to be signaled. A non-nil error is returned only if the request was +// canceled. +func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { + ctx := reqContext(req) + if req.Cancel == nil && ctx.Done() == nil { + return nil + } + select { + case <-req.Cancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-done: + return nil + } +} + +// awaitRequestCancel waits for the user to cancel a request, its context to +// expire, or for the request to be done (any way it might be removed from the +// cc.streams map: peer reset, successful completion, TCP connection breakage, +// etc). If the request is canceled, then cs will be canceled and closed. +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + if err := awaitRequestCancel(req, cs.done); err != nil { + cs.cancelStream() + cs.bufPipe.CloseWithError(err) + } +} + +func (cs *clientStream) cancelStream() { + cc := cs.cc + cc.mu.Lock() + didReset := cs.didReset + cs.didReset = true + cc.mu.Unlock() + + if !didReset { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + cc.forgetStreamID(cs.ID) + } +} + +// checkResetOrDone reports any error sent in a RST_STREAM frame by the +// server, or errStreamClosed if the stream is complete. +func (cs *clientStream) checkResetOrDone() error { + select { + case <-cs.peerReset: + return cs.resetErr + case <-cs.done: + return errStreamClosed + default: + return nil + } +} + +func (cs *clientStream) getStartedWrite() bool { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return cs.startedWrite +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() +} + +type stickyErrWriter struct { + w io.Writer + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + n, err = sew.w.Write(p) + *sew.err = err + return +} + +// noCachedConnError is the concrete type of ErrNoCachedConn, which +// needs to be detected by net/http regardless of whether it's its +// bundled version (in h2_bundle.go with a rewritten type name) or +// from a user's x/net/http2. As such, as it has a unique method name +// (IsHTTP2NoCachedConnError) that net/http sniffs for via func +// isNoCachedConnError. +type noCachedConnError struct{} + +func (noCachedConnError) IsHTTP2NoCachedConnError() {} +func (noCachedConnError) Error() string { return "http2: no cached connection was available" } + +// isNoCachedConnError reports whether err is of type noCachedConnError +// or its equivalent renamed type in net/http2's h2_bundle.go. Both types +// may coexist in the same running program. +func isNoCachedConnError(err error) bool { + _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) + return ok +} + +var ErrNoCachedConn error = noCachedConnError{} + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(scheme string, authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + return nil, errors.New("http2: unsupported scheme") + } + + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + for retry := 0; ; retry++ { + cc, err := t.connPool().GetClientConn(req, addr) + if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) + return nil, err + } + traceGotConn(req, cc) + res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) + if err != nil && retry <= 6 { + if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { + // After the first retry, do exponential backoff with 10% jitter. + if retry == 0 { + continue + } + backoff := float64(uint(1) << (uint(retry) - 1)) + backoff += backoff * (0.1 * mathrand.Float64()) + select { + case <-time.After(time.Second * time.Duration(backoff)): + continue + case <-reqContext(req).Done(): + return nil, reqContext(req).Err() + } + } + } + if err != nil { + t.vlogf("RoundTrip failure: %v", err) + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { + cp.closeIdleConnections() + } +} + +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") +) + +// shouldRetryRequest is called by RoundTrip when a request fails to get +// response headers. It is always called with a non-nil error. +// It returns either a request to retry (either the same request, or a +// modified clone), or an error if the request can't be replayed. +func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { + if !canRetryError(err) { + return nil, err + } + if !afterBodyWrite { + return req, nil + } + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return req, nil + } + // Otherwise we depend on the Request having its GetBody + // func defined. + getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody + if getBody == nil { + return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) + } + body, err := getBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil +} + +func canRetryError(err error) bool { + if err == errClientConnUnusable || err == errClientConnGotGoAway { + return true + } + if se, ok := err.(StreamError); ok { + return se.Code == ErrCodeRefusedStream + } + return false +} + +func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + if err != nil { + return nil, err + } + return t.newClientConn(tconn, singleUse) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *cloneTLSConfig(t.TLSClientConfig) + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + } + state := cn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) expectContinueTimeout() time.Duration { + if t.t1 == nil { + return 0 + } + return transportExpectContinueTimeout(t.t1) +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + return t.newClientConn(c, false) +} + +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + cc := &ClientConn{ + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), + } + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) + } + + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) + cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? + cc.henc = hpack.NewEncoder(&cc.hbuf) + + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + + cc.bw.Write(clientPreface) + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.bw.Flush() + if cc.werr != nil { + return nil, cc.werr + } + + go cc.readLoop() + return cc, nil +} + +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + + old := cc.goAway + cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } + last := f.LastStreamID + for streamID, cs := range cc.streams { + if streamID > last { + select { + case cs.resc <- resAndError{err: errClientConnGotGoAway}: + default: + } + } + } +} + +// CanTakeNewRequest reports whether the connection can take a new request, +// meaning it has not been closed or received or sent a GOAWAY. +func (cc *ClientConn) CanTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.canTakeNewRequestLocked() +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + if cc.singleUse && cc.nextStreamID > 1 { + return false + } + return cc.goAway == nil && !cc.closed && + int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 +} + +// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// only be called when we're idle, but because we're coming from a new +// goroutine, there could be a new request coming in at the same time, +// so this simply calls the synchronized closeIfIdle to shut down this +// connection. The timer could just call closeIfIdle, but this is more +// clear. +func (cc *ClientConn) onIdleTimeout() { + cc.closeIfIdle() +} + +func (cc *ClientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + nextID := cc.nextStreamID + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } + cc.tconn.Close() +} + +const maxAllocFrameSize = 512 << 10 + +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) + } + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) + } + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { + return fmt.Errorf("http2: invalid Connection request header: %q", vv) + } + return nil +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + resp, _, err := cc.roundTrip(req) + return resp, err +} + +func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { + if err := checkConnHeaders(req); err != nil { + return nil, false, err + } + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, false, err + } + hasTrailers := trailers != "" + + cc.mu.Lock() + if err := cc.awaitOpenSlotForRequest(req); err != nil { + cc.mu.Unlock() + return nil, false, err + } + + body := req.Body + contentLen := actualContentLength(req) + hasBody := contentLen != 0 + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + var requestedGzip bool + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + requestedGzip = true + } + + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + if err != nil { + cc.mu.Unlock() + return nil, false, err + } + + cs := cc.newStream() + cs.req = req + cs.trace = requestTrace(req) + cs.requestedGzip = requestedGzip + bodyWriter := cc.t.getBodyWriterState(cs, body) + cs.on100 = bodyWriter.on100 + + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) + cc.wmu.Unlock() + traceWroteHeaders(cs.trace) + cc.mu.Unlock() + + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + bodyWriter.cancel() + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + traceWroteRequest(cs.trace, werr) + return nil, false, werr + } + + var respHeaderTimer <-chan time.Time + if hasBody { + bodyWriter.scheduleBodyWrite() + } else { + traceWroteRequest(cs.trace, nil) + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + bodyWritten := false + ctx := reqContext(req) + + handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, false, nil + } + + for { + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + case <-respHeaderTimer: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errTimeout + case <-ctx.Done(): + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), ctx.Err() + case <-req.Cancel: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.getStartedWrite(), cs.resetErr + case err := <-bodyWriter.resc: + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } + if err != nil { + return nil, cs.getStartedWrite(), err + } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. +// Must hold cc.mu. +func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { + var waitingForConn chan struct{} + var waitingForConnErr error // guarded by cc.mu + for { + cc.lastActive = time.Now() + if cc.closed || !cc.canTakeNewRequestLocked() { + return errClientConnUnusable + } + if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { + if waitingForConn != nil { + close(waitingForConn) + } + return nil + } + // Unfortunately, we cannot wait on a condition variable and channel at + // the same time, so instead, we spin up a goroutine to check if the + // request is canceled while we wait for a slot to open in the connection. + if waitingForConn == nil { + waitingForConn = make(chan struct{}) + go func() { + if err := awaitRequestCancel(req, waitingForConn); err != nil { + cc.mu.Lock() + waitingForConnErr = err + cc.cond.Broadcast() + cc.mu.Unlock() + } + }() + } + cc.pendingRequests++ + cc.cond.Wait() + cc.pendingRequests-- + if waitingForConnErr != nil { + return waitingForConnErr + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + for len(hdrs) > 0 && cc.werr == nil { + chunk := hdrs + if len(chunk) > maxFrameSize { + chunk = chunk[:maxFrameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: chunk, + EndStream: endStream, + EndHeaders: endHeaders, + }) + first = false + } else { + cc.fr.WriteContinuation(streamID, endHeaders, chunk) + } + } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. + cc.bw.Flush() + return cc.werr +} + +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + traceWroteRequest(cs.trace, err) + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf) + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } + + var trls []byte + if hasTrailers { + cc.mu.Lock() + trls, err = cc.encodeTrailers(req) + cc.mu.Unlock() + if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeInternal, err) + cc.forgetStreamID(cs.ID) + return err + } + } + + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkResetOrDone(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + +// requires cc.mu be held. +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { + cc.hbuf.Reset() + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httplex.PunycodeHostPort(host) + if err != nil { + return nil, err + } + + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httplex.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + f(":method", req.Method) + if req.Method != "CONNECT" { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || + strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || + strings.EqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if strings.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + f("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", defaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + cc.writeHeader(strings.ToLower(name), value) + }) + + return cc.hbuf.Bytes(), nil +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { + cc.hbuf.Reset() + + hlSize := uint64(0) + for k, vv := range req.Trailer { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + hlSize += uint64(hf.Size()) + } + } + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filtered at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes(), nil +} + +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *ClientConn) newStream() *clientStream { + cs := &clientStream{ + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), + } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + return cs +} + +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { + cc.mu.Lock() + defer cc.mu.Unlock() + cs := cc.streams[id] + if andRemove && cs != nil && !cc.closed { + cc.lastActive = time.Now() + delete(cc.streams, id) + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + } + close(cs.done) + // Wake up checkResetOrDone via clientStream.awaitFlowControl and + // wake up RoundTrip if there is a pending request. + cc.cond.Broadcast() + } + return cs +} + +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + cc *ClientConn + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{cc: cc} + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) + defer close(cc.readerDone) + + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + err := cc.readerErr + cc.mu.Lock() + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else if err == io.EOF { + err = io.ErrUnexpectedEOF + } + for _, cs := range cc.streams { + cs.bufPipe.CloseWithError(err) // no-op if already closed + select { + case cs.resc <- resAndError{err: err}: + default: + } + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} + +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse + gotReply := false // ever saw a HEADERS reply + gotSettings := false + for { + f, err := cc.fr.ReadFrame() + if err != nil { + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) + } + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, false); cs != nil { + cs.cc.writeStreamReset(cs.ID, se.Code, err) + cs.cc.forgetStreamID(cs.ID) + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) + } + continue + } else if err != nil { + return err + } + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) + } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } + maybeIdle := false // whether frame might transition us to idle + + switch f := f.(type) { + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true + case *DataFrame: + err = rl.processData(f) + maybeIdle = true + case *GoAwayFrame: + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) + default: + cc.logf("Transport: unhandled response frame type %T", f) + } + if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle { + cc.closeIfIdle() + } + } +} + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if f.StreamEnded() { + // Issue 20521: If the stream has ended, streamByID() causes + // clientStream.done to be closed, which causes the request's bodyWriter + // to be closed with an errStreamClosed, which may be received by + // clientConn.RoundTrip before the result of processing these headers. + // Deferring stream closure allows the header processing to occur first. + // clientConn.RoundTrip may still receive the bodyWriter error first, but + // the fix for issue 16102 prioritises any response. + // + // Issue 22413: If there is no request body, we should close the + // stream before writing to cs.resc so that the stream is closed + // immediately once RoundTrip returns. + if cs.req.Body != nil { + defer cc.forgetStreamID(f.StreamID) + } else { + cc.forgetStreamID(f.StreamID) + } + } + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) + } + cs.firstByte = true + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cc.forgetStreamID(cs.ID) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 100 expect continue). This special +// case is going away after Issue 13851 is fixed. +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("malformed response from server: missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") + } + + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + cs.pastHeaders = false // do it all again + return nil, nil + } + + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } + + if streamEnded || isHead { + res.Body = noBody + return res, nil + } + + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(cs.req) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + setResponseUncompressed(res) + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + // Consider any buffered body data (read from the conn but not + // consumed by the client) when computing flow control for this + // stream. + v := int(cs.inflow.available()) + cs.bufPipe.Len() + if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = int32(transportDefaultStreamFlow - v) + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + cc := cs.cc + + serverSentStreamEnd := cs.bufPipe.Err() == io.EOF + unread := cs.bufPipe.Len() + + if unread > 0 || !serverSentStreamEnd { + cc.mu.Lock() + cc.wmu.Lock() + if !serverSentStreamEnd { + cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + cs.didReset = true + } + // Return connection-level flow control. + if unread > 0 { + cc.inflow.add(int32(unread)) + cc.fr.WriteWindowUpdate(0, uint32(unread)) + } + cc.bw.Flush() + cc.wmu.Unlock() + cc.mu.Unlock() + } + + cs.bufPipe.BreakWithError(errClosedResponseBody) + cc.forgetStreamID(cs.ID) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + data := f.Data() + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + cc.inflow.add(int32(f.Length)) + cc.mu.Unlock() + + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(f.Length)) + cc.bw.Flush() + cc.wmu.Unlock() + } + return nil + } + if !cs.firstByte { + cc.logf("protocol error: received DATA before a HEADERS frame") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + if f.Length > 0 { + if cs.req.Method == "HEAD" && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(f.Length) { + cs.inflow.take(int32(f.Length)) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + // Return any padded flow control now, since we won't + // refund it later on body reads. + var refund int + if pad := int(f.Length) - len(data); pad > 0 { + refund += pad + } + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset := cs.didReset + if didReset { + refund += len(data) + } + if refund > 0 { + cc.inflow.add(int32(refund)) + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + } + cc.bw.Flush() + cc.wmu.Unlock() + } + cc.mu.Unlock() + + if len(data) > 0 && !didReset { + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +var errInvalidTrailers = errors.New("http2: invalid trailers") + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + if isConnectionCloseRequest(cs.req) { + rl.closeWhenIdle = true + } + cs.bufPipe.closeWithErrorAndCode(err, code) + + select { + case cs.resc <- resAndError{err: err}: + default: + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + err := f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingMaxHeaderListSize: + cc.peerMaxHeaderListSize = uint64(s.Val) + case SettingInitialWindowSize: + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } + + // Adjust flow control of currently-open + // frames by the difference of the old initial + // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + cc.fr.WriteSettingsAck() + cc.bw.Flush() + return cc.werr +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := streamError(cs.ID, f.ErrCode) + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + } + return nil +} + +// Ping sends a PING frame to the server and waits for the ack. +// Public implementation is in go17.go and not_go17.go +func (cc *ClientConn) ping(ctx contextContext) error { + c := make(chan struct{}) + // Generate a random payload + var p [8]byte + for { + if _, err := rand.Read(p[:]); err != nil { + return err + } + cc.mu.Lock() + // check for dup before insert + if _, found := cc.pings[p]; !found { + cc.pings[p] = c + cc.mu.Unlock() + break + } + cc.mu.Unlock() + } + cc.wmu.Lock() + if err := cc.fr.WritePing(false, p); err != nil { + cc.wmu.Unlock() + return err + } + if err := cc.bw.Flush(); err != nil { + cc.wmu.Unlock() + return err + } + cc.wmu.Unlock() + select { + case <-c: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: + // connection closed + return cc.readerErr + } +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + // If ack, notify listener if any + if c, ok := cc.pings[f.Data]; ok { + close(c) + delete(cc.pings, f.Data) + } + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } + +// bodyWriterState encapsulates various state around the Transport's writing +// of the request body, particularly regarding doing delayed writes of the body +// when the request contains "Expect: 100-continue". +type bodyWriterState struct { + cs *clientStream + timer *time.Timer // if non-nil, we're doing a delayed write + fnonce *sync.Once // to call fn with + fn func() // the code to run in the goroutine, writing the body + resc chan error // result of fn's execution + delay time.Duration // how long we should delay a delayed write for +} + +func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { + s.cs = cs + if body == nil { + return + } + resc := make(chan error, 1) + s.resc = resc + s.fn = func() { + cs.cc.mu.Lock() + cs.startedWrite = true + cs.cc.mu.Unlock() + resc <- cs.writeRequestBody(body, cs.req.Body) + } + s.delay = t.expectContinueTimeout() + if s.delay == 0 || + !httplex.HeaderValuesContainsToken( + cs.req.Header["Expect"], + "100-continue") { + return + } + s.fnonce = new(sync.Once) + + // Arm the timer with a very large duration, which we'll + // intentionally lower later. It has to be large now because + // we need a handle to it before writing the headers, but the + // s.delay value is defined to not start until after the + // request headers were written. + const hugeDuration = 365 * 24 * time.Hour + s.timer = time.AfterFunc(hugeDuration, func() { + s.fnonce.Do(s.fn) + }) + return +} + +func (s bodyWriterState) cancel() { + if s.timer != nil { + s.timer.Stop() + } +} + +func (s bodyWriterState) on100() { + if s.timer == nil { + // If we didn't do a delayed write, ignore the server's + // bogus 100 continue response. + return + } + s.timer.Stop() + go func() { s.fnonce.Do(s.fn) }() +} + +// scheduleBodyWrite starts writing the body, either immediately (in +// the common case) or after the delay timeout. It should not be +// called until after the headers have been written. +func (s bodyWriterState) scheduleBodyWrite() { + if s.timer == nil { + // We're not doing a delayed write (see + // getBodyWriterState), so just start the writing + // goroutine immediately. + go s.fn() + return + } + traceWait100Continue(s.cs.trace) + if s.timer.Stop() { + s.timer.Reset(s.delay) + } +} + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") +} diff --git a/vendor/golang.org/x/net/http2/transport_test.go b/vendor/golang.org/x/net/http2/transport_test.go new file mode 100644 index 000000000..fe04bd287 --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport_test.go @@ -0,0 +1,3847 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/http2/hpack" +) + +var ( + extNet = flag.Bool("extnet", false, "do external network tests") + transportHost = flag.String("transporthost", "http2.golang.org", "hostname to use for TestTransport") + insecure = flag.Bool("insecure", false, "insecure TLS dials") // TODO: dead code. remove? +) + +var tlsConfigInsecure = &tls.Config{InsecureSkipVerify: true} + +type testContext struct{} + +func (testContext) Done() <-chan struct{} { return make(chan struct{}) } +func (testContext) Err() error { panic("should not be called") } +func (testContext) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } +func (testContext) Value(key interface{}) interface{} { return nil } + +func TestTransportExternal(t *testing.T) { + if !*extNet { + t.Skip("skipping external network test") + } + req, _ := http.NewRequest("GET", "https://"+*transportHost+"/", nil) + rt := &Transport{TLSClientConfig: tlsConfigInsecure} + res, err := rt.RoundTrip(req) + if err != nil { + t.Fatalf("%v", err) + } + res.Write(os.Stdout) +} + +type fakeTLSConn struct { + net.Conn +} + +func (c *fakeTLSConn) ConnectionState() tls.ConnectionState { + return tls.ConnectionState{ + Version: tls.VersionTLS12, + CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + } +} + +func startH2cServer(t *testing.T) net.Listener { + h2Server := &Server{} + l := newLocalListener(t) + go func() { + conn, err := l.Accept() + if err != nil { + t.Error(err) + return + } + h2Server.ServeConn(&fakeTLSConn{conn}, &ServeConnOpts{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello, %v, http: %v", r.URL.Path, r.TLS == nil) + })}) + }() + return l +} + +func TestTransportH2c(t *testing.T) { + l := startH2cServer(t) + defer l.Close() + req, err := http.NewRequest("GET", "http://"+l.Addr().String()+"/foobar", nil) + if err != nil { + t.Fatal(err) + } + tr := &Transport{ + AllowHTTP: true, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + return net.Dial(network, addr) + }, + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if res.ProtoMajor != 2 { + t.Fatal("proto not h2c") + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if got, want := string(body), "Hello, /foobar, http: true"; got != want { + t.Fatalf("response got %v, want %v", got, want) + } +} + +func TestTransport(t *testing.T) { + const body = "sup" + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, body) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + t.Logf("Got res: %+v", res) + if g, w := res.StatusCode, 200; g != w { + t.Errorf("StatusCode = %v; want %v", g, w) + } + if g, w := res.Status, "200 OK"; g != w { + t.Errorf("Status = %q; want %q", g, w) + } + wantHeader := http.Header{ + "Content-Length": []string{"3"}, + "Content-Type": []string{"text/plain; charset=utf-8"}, + "Date": []string{"XXX"}, // see cleanDate + } + cleanDate(res) + if !reflect.DeepEqual(res.Header, wantHeader) { + t.Errorf("res Header = %v; want %v", res.Header, wantHeader) + } + if res.Request != req { + t.Errorf("Response.Request = %p; want %p", res.Request, req) + } + if res.TLS == nil { + t.Error("Response.TLS = nil; want non-nil") + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("Body read: %v", err) + } else if string(slurp) != body { + t.Errorf("Body = %q; want %q", slurp, body) + } +} + +func onSameConn(t *testing.T, modReq func(*http.Request)) bool { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.RemoteAddr) + }, optOnlyServer, func(c net.Conn, st http.ConnState) { + t.Logf("conn %v is now state %v", c.RemoteAddr(), st) + }) + defer st.Close() + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + get := func() string { + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + modReq(req) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("Body read: %v", err) + } + addr := strings.TrimSpace(string(slurp)) + if addr == "" { + t.Fatalf("didn't get an addr in response") + } + return addr + } + first := get() + second := get() + return first == second +} + +func TestTransportReusesConns(t *testing.T) { + if !onSameConn(t, func(*http.Request) {}) { + t.Errorf("first and second responses were on different connections") + } +} + +func TestTransportReusesConn_RequestClose(t *testing.T) { + if onSameConn(t, func(r *http.Request) { r.Close = true }) { + t.Errorf("first and second responses were not on different connections") + } +} + +func TestTransportReusesConn_ConnClose(t *testing.T) { + if onSameConn(t, func(r *http.Request) { r.Header.Set("Connection", "close") }) { + t.Errorf("first and second responses were not on different connections") + } +} + +// Tests that the Transport only keeps one pending dial open per destination address. +// https://golang.org/issue/13397 +func TestTransportGroupsPendingDials(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.RemoteAddr) + }, optOnlyServer) + defer st.Close() + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + } + defer tr.CloseIdleConnections() + var ( + mu sync.Mutex + dials = map[string]int{} + ) + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Error(err) + return + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Error(err) + return + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("Body read: %v", err) + } + addr := strings.TrimSpace(string(slurp)) + if addr == "" { + t.Errorf("didn't get an addr in response") + } + mu.Lock() + dials[addr]++ + mu.Unlock() + }() + } + wg.Wait() + if len(dials) != 1 { + t.Errorf("saw %d dials; want 1: %v", len(dials), dials) + } + tr.CloseIdleConnections() + if err := retry(50, 10*time.Millisecond, func() error { + cp, ok := tr.connPool().(*clientConnPool) + if !ok { + return fmt.Errorf("Conn pool is %T; want *clientConnPool", tr.connPool()) + } + cp.mu.Lock() + defer cp.mu.Unlock() + if len(cp.dialing) != 0 { + return fmt.Errorf("dialing map = %v; want empty", cp.dialing) + } + if len(cp.conns) != 0 { + return fmt.Errorf("conns = %v; want empty", cp.conns) + } + if len(cp.keys) != 0 { + return fmt.Errorf("keys = %v; want empty", cp.keys) + } + return nil + }); err != nil { + t.Errorf("State of pool after CloseIdleConnections: %v", err) + } +} + +func retry(tries int, delay time.Duration, fn func() error) error { + var err error + for i := 0; i < tries; i++ { + err = fn() + if err == nil { + return nil + } + time.Sleep(delay) + } + return err +} + +func TestTransportAbortClosesPipes(t *testing.T) { + shutdown := make(chan struct{}) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() + <-shutdown + }, + optOnlyServer, + ) + defer st.Close() + defer close(shutdown) // we must shutdown before st.Close() to avoid hanging + + done := make(chan struct{}) + requestMade := make(chan struct{}) + go func() { + defer close(done) + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + close(requestMade) + _, err = ioutil.ReadAll(res.Body) + if err == nil { + t.Error("expected error from res.Body.Read") + } + }() + + <-requestMade + // Now force the serve loop to end, via closing the connection. + st.closeConn() + // deadlock? that's a bug. + select { + case <-done: + case <-time.After(3 * time.Second): + t.Fatal("timeout") + } +} + +// TODO: merge this with TestTransportBody to make TestTransportRequest? This +// could be a table-driven test with extra goodies. +func TestTransportPath(t *testing.T) { + gotc := make(chan *url.URL, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + gotc <- r.URL + }, + optOnlyServer, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + const ( + path = "/testpath" + query = "q=1" + ) + surl := st.ts.URL + path + "?" + query + req, err := http.NewRequest("POST", surl, nil) + if err != nil { + t.Fatal(err) + } + c := &http.Client{Transport: tr} + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + got := <-gotc + if got.Path != path { + t.Errorf("Read Path = %q; want %q", got.Path, path) + } + if got.RawQuery != query { + t.Errorf("Read RawQuery = %q; want %q", got.RawQuery, query) + } +} + +func randString(n int) string { + rnd := rand.New(rand.NewSource(int64(n))) + b := make([]byte, n) + for i := range b { + b[i] = byte(rnd.Intn(256)) + } + return string(b) +} + +type panicReader struct{} + +func (panicReader) Read([]byte) (int, error) { panic("unexpected Read") } +func (panicReader) Close() error { panic("unexpected Close") } + +func TestActualContentLength(t *testing.T) { + tests := []struct { + req *http.Request + want int64 + }{ + // Verify we don't read from Body: + 0: { + req: &http.Request{Body: panicReader{}}, + want: -1, + }, + // nil Body means 0, regardless of ContentLength: + 1: { + req: &http.Request{Body: nil, ContentLength: 5}, + want: 0, + }, + // ContentLength is used if set. + 2: { + req: &http.Request{Body: panicReader{}, ContentLength: 5}, + want: 5, + }, + // http.NoBody means 0, not -1. + 3: { + req: &http.Request{Body: go18httpNoBody()}, + want: 0, + }, + } + for i, tt := range tests { + got := actualContentLength(tt.req) + if got != tt.want { + t.Errorf("test[%d]: got %d; want %d", i, got, tt.want) + } + } +} + +func TestTransportBody(t *testing.T) { + bodyTests := []struct { + body string + noContentLen bool + }{ + {body: "some message"}, + {body: "some message", noContentLen: true}, + {body: strings.Repeat("a", 1<<20), noContentLen: true}, + {body: strings.Repeat("a", 1<<20)}, + {body: randString(16<<10 - 1)}, + {body: randString(16 << 10)}, + {body: randString(16<<10 + 1)}, + {body: randString(512<<10 - 1)}, + {body: randString(512 << 10)}, + {body: randString(512<<10 + 1)}, + {body: randString(1<<20 - 1)}, + {body: randString(1 << 20)}, + {body: randString(1<<20 + 2)}, + } + + type reqInfo struct { + req *http.Request + slurp []byte + err error + } + gotc := make(chan reqInfo, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + slurp, err := ioutil.ReadAll(r.Body) + if err != nil { + gotc <- reqInfo{err: err} + } else { + gotc <- reqInfo{req: r, slurp: slurp} + } + }, + optOnlyServer, + ) + defer st.Close() + + for i, tt := range bodyTests { + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + var body io.Reader = strings.NewReader(tt.body) + if tt.noContentLen { + body = struct{ io.Reader }{body} // just a Reader, hiding concrete type and other methods + } + req, err := http.NewRequest("POST", st.ts.URL, body) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + c := &http.Client{Transport: tr} + res, err := c.Do(req) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + defer res.Body.Close() + ri := <-gotc + if ri.err != nil { + t.Errorf("#%d: read error: %v", i, ri.err) + continue + } + if got := string(ri.slurp); got != tt.body { + t.Errorf("#%d: Read body mismatch.\n got: %q (len %d)\nwant: %q (len %d)", i, shortString(got), len(got), shortString(tt.body), len(tt.body)) + } + wantLen := int64(len(tt.body)) + if tt.noContentLen && tt.body != "" { + wantLen = -1 + } + if ri.req.ContentLength != wantLen { + t.Errorf("#%d. handler got ContentLength = %v; want %v", i, ri.req.ContentLength, wantLen) + } + } +} + +func shortString(v string) string { + const maxLen = 100 + if len(v) <= maxLen { + return v + } + return fmt.Sprintf("%v[...%d bytes omitted...]%v", v[:maxLen/2], len(v)-maxLen, v[len(v)-maxLen/2:]) +} + +func TestTransportDialTLS(t *testing.T) { + var mu sync.Mutex // guards following + var gotReq, didDial bool + + ts := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + gotReq = true + mu.Unlock() + }, + optOnlyServer, + ) + defer ts.Close() + tr := &Transport{ + DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) { + mu.Lock() + didDial = true + mu.Unlock() + cfg.InsecureSkipVerify = true + c, err := tls.Dial(netw, addr, cfg) + if err != nil { + return nil, err + } + return c, c.Handshake() + }, + } + defer tr.CloseIdleConnections() + client := &http.Client{Transport: tr} + res, err := client.Get(ts.ts.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + mu.Lock() + if !gotReq { + t.Error("didn't get request") + } + if !didDial { + t.Error("didn't use dial hook") + } +} + +func TestConfigureTransport(t *testing.T) { + t1 := &http.Transport{} + err := ConfigureTransport(t1) + if err == errTransportVersion { + t.Skip(err) + } + if err != nil { + t.Fatal(err) + } + if got := fmt.Sprintf("%#v", t1); !strings.Contains(got, `"h2"`) { + // Laziness, to avoid buildtags. + t.Errorf("stringification of HTTP/1 transport didn't contain \"h2\": %v", got) + } + wantNextProtos := []string{"h2", "http/1.1"} + if t1.TLSClientConfig == nil { + t.Errorf("nil t1.TLSClientConfig") + } else if !reflect.DeepEqual(t1.TLSClientConfig.NextProtos, wantNextProtos) { + t.Errorf("TLSClientConfig.NextProtos = %q; want %q", t1.TLSClientConfig.NextProtos, wantNextProtos) + } + if err := ConfigureTransport(t1); err == nil { + t.Error("unexpected success on second call to ConfigureTransport") + } + + // And does it work? + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.Proto) + }, optOnlyServer) + defer st.Close() + + t1.TLSClientConfig.InsecureSkipVerify = true + c := &http.Client{Transport: t1} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if got, want := string(slurp), "HTTP/2.0"; got != want { + t.Errorf("body = %q; want %q", got, want) + } +} + +type capitalizeReader struct { + r io.Reader +} + +func (cr capitalizeReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + for i, b := range p[:n] { + if b >= 'a' && b <= 'z' { + p[i] = b - ('a' - 'A') + } + } + return +} + +type flushWriter struct { + w io.Writer +} + +func (fw flushWriter) Write(p []byte) (n int, err error) { + n, err = fw.w.Write(p) + if f, ok := fw.w.(http.Flusher); ok { + f.Flush() + } + return +} + +type clientTester struct { + t *testing.T + tr *Transport + sc, cc net.Conn // server and client conn + fr *Framer // server's framer + client func() error + server func() error +} + +func newClientTester(t *testing.T) *clientTester { + var dialOnce struct { + sync.Mutex + dialed bool + } + ct := &clientTester{ + t: t, + } + ct.tr = &Transport{ + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + dialOnce.Lock() + defer dialOnce.Unlock() + if dialOnce.dialed { + return nil, errors.New("only one dial allowed in test mode") + } + dialOnce.dialed = true + return ct.cc, nil + }, + } + + ln := newLocalListener(t) + cc, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + + } + sc, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + ln.Close() + ct.cc = cc + ct.sc = sc + ct.fr = NewFramer(sc, sc) + return ct +} + +func newLocalListener(t *testing.T) net.Listener { + ln, err := net.Listen("tcp4", "127.0.0.1:0") + if err == nil { + return ln + } + ln, err = net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + return ln +} + +func (ct *clientTester) greet(settings ...Setting) { + buf := make([]byte, len(ClientPreface)) + _, err := io.ReadFull(ct.sc, buf) + if err != nil { + ct.t.Fatalf("reading client preface: %v", err) + } + f, err := ct.fr.ReadFrame() + if err != nil { + ct.t.Fatalf("Reading client settings frame: %v", err) + } + if sf, ok := f.(*SettingsFrame); !ok { + ct.t.Fatalf("Wanted client settings frame; got %v", f) + _ = sf // stash it away? + } + if err := ct.fr.WriteSettings(settings...); err != nil { + ct.t.Fatal(err) + } + if err := ct.fr.WriteSettingsAck(); err != nil { + ct.t.Fatal(err) + } +} + +func (ct *clientTester) readNonSettingsFrame() (Frame, error) { + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return nil, err + } + if _, ok := f.(*SettingsFrame); ok { + continue + } + return f, nil + } +} + +func (ct *clientTester) cleanup() { + ct.tr.CloseIdleConnections() +} + +func (ct *clientTester) run() { + errc := make(chan error, 2) + ct.start("client", errc, ct.client) + ct.start("server", errc, ct.server) + defer ct.cleanup() + for i := 0; i < 2; i++ { + if err := <-errc; err != nil { + ct.t.Error(err) + return + } + } +} + +func (ct *clientTester) start(which string, errc chan<- error, fn func() error) { + go func() { + finished := false + var err error + defer func() { + if !finished { + err = fmt.Errorf("%s goroutine didn't finish.", which) + } else if err != nil { + err = fmt.Errorf("%s: %v", which, err) + } + errc <- err + }() + err = fn() + finished = true + }() +} + +func (ct *clientTester) readFrame() (Frame, error) { + return readFrameTimeout(ct.fr, 2*time.Second) +} + +func (ct *clientTester) firstHeaders() (*HeadersFrame, error) { + for { + f, err := ct.readFrame() + if err != nil { + return nil, fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + hf, ok := f.(*HeadersFrame) + if !ok { + return nil, fmt.Errorf("Got %T; want HeadersFrame", f) + } + return hf, nil + } +} + +type countingReader struct { + n *int64 +} + +func (r countingReader) Read(p []byte) (n int, err error) { + for i := range p { + p[i] = byte(i) + } + atomic.AddInt64(r.n, int64(len(p))) + return len(p), err +} + +func TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyAfterResponse(t, 200) } +func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) } + +func testTransportReqBodyAfterResponse(t *testing.T, status int) { + const bodySize = 10 << 20 + clientDone := make(chan struct{}) + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + + var n int64 // atomic + req, err := http.NewRequest("PUT", "https://dummy.tld/", io.LimitReader(countingReader{&n}, bodySize)) + if err != nil { + return err + } + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != status { + return fmt.Errorf("status code = %v; want %v", res.StatusCode, status) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("Slurp: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("unexpected body: %q", slurp) + } + if status == 200 { + if got := atomic.LoadInt64(&n); got != bodySize { + return fmt.Errorf("For 200 response, Transport wrote %d bytes; want %d", got, bodySize) + } + } else { + if got := atomic.LoadInt64(&n); got == 0 || got >= bodySize { + return fmt.Errorf("For %d response, Transport wrote %d bytes; want (0,%d) exclusive", status, got, bodySize) + } + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + var dataRecv int64 + var closed bool + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it + // will have reported any + // errors on its side. + return nil + default: + return err + } + } + //println(fmt.Sprintf("server got frame: %v", f)) + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + if f.StreamEnded() { + return fmt.Errorf("headers contains END_STREAM unexpectedly: %v", f) + } + case *DataFrame: + dataLen := len(f.Data()) + if dataLen > 0 { + if dataRecv == 0 { + enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + } + if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { + return err + } + if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil { + return err + } + } + dataRecv += int64(dataLen) + + if !closed && ((status != 200 && dataRecv > 0) || + (status == 200 && dataRecv == bodySize)) { + closed = true + if err := ct.fr.WriteData(f.StreamID, true, nil); err != nil { + return err + } + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} + +// See golang.org/issue/13444 +func TestTransportFullDuplex(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) // redundant but for clarity + w.(http.Flusher).Flush() + io.Copy(flushWriter{w}, capitalizeReader{r.Body}) + fmt.Fprintf(w, "bye.\n") + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + pr, pw := io.Pipe() + req, err := http.NewRequest("PUT", st.ts.URL, ioutil.NopCloser(pr)) + if err != nil { + t.Fatal(err) + } + req.ContentLength = -1 + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + t.Fatalf("StatusCode = %v; want %v", res.StatusCode, 200) + } + bs := bufio.NewScanner(res.Body) + want := func(v string) { + if !bs.Scan() { + t.Fatalf("wanted to read %q but Scan() = false, err = %v", v, bs.Err()) + } + } + write := func(v string) { + _, err := io.WriteString(pw, v) + if err != nil { + t.Fatalf("pipe write: %v", err) + } + } + write("foo\n") + want("FOO") + write("bar\n") + want("BAR") + pw.Close() + want("bye.") + if err := bs.Err(); err != nil { + t.Fatal(err) + } +} + +func TestTransportConnectRequest(t *testing.T) { + gotc := make(chan *http.Request, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + gotc <- r + }, optOnlyServer) + defer st.Close() + + u, err := url.Parse(st.ts.URL) + if err != nil { + t.Fatal(err) + } + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + tests := []struct { + req *http.Request + want string + }{ + { + req: &http.Request{ + Method: "CONNECT", + Header: http.Header{}, + URL: u, + }, + want: u.Host, + }, + { + req: &http.Request{ + Method: "CONNECT", + Header: http.Header{}, + URL: u, + Host: "example.com:123", + }, + want: "example.com:123", + }, + } + + for i, tt := range tests { + res, err := c.Do(tt.req) + if err != nil { + t.Errorf("%d. RoundTrip = %v", i, err) + continue + } + res.Body.Close() + req := <-gotc + if req.Method != "CONNECT" { + t.Errorf("method = %q; want CONNECT", req.Method) + } + if req.Host != tt.want { + t.Errorf("Host = %q; want %q", req.Host, tt.want) + } + if req.URL.Host != tt.want { + t.Errorf("URL.Host = %q; want %q", req.URL.Host, tt.want) + } + } +} + +type headerType int + +const ( + noHeader headerType = iota // omitted + oneHeader + splitHeader // broken into continuation on purpose +) + +const ( + f0 = noHeader + f1 = oneHeader + f2 = splitHeader + d0 = false + d1 = true +) + +// Test all 36 combinations of response frame orders: +// (3 ways of 100-continue) * (2 ways of headers) * (2 ways of data) * (3 ways of trailers):func TestTransportResponsePattern_00f0(t *testing.T) { testTransportResponsePattern(h0, h1, false, h0) } +// Generated by http://play.golang.org/p/SScqYKJYXd +func TestTransportResPattern_c0h1d0t0(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f0) } +func TestTransportResPattern_c0h1d0t1(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f1) } +func TestTransportResPattern_c0h1d0t2(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f2) } +func TestTransportResPattern_c0h1d1t0(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f0) } +func TestTransportResPattern_c0h1d1t1(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f1) } +func TestTransportResPattern_c0h1d1t2(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f2) } +func TestTransportResPattern_c0h2d0t0(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f0) } +func TestTransportResPattern_c0h2d0t1(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f1) } +func TestTransportResPattern_c0h2d0t2(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f2) } +func TestTransportResPattern_c0h2d1t0(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f0) } +func TestTransportResPattern_c0h2d1t1(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f1) } +func TestTransportResPattern_c0h2d1t2(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f2) } +func TestTransportResPattern_c1h1d0t0(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f0) } +func TestTransportResPattern_c1h1d0t1(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f1) } +func TestTransportResPattern_c1h1d0t2(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f2) } +func TestTransportResPattern_c1h1d1t0(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f0) } +func TestTransportResPattern_c1h1d1t1(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f1) } +func TestTransportResPattern_c1h1d1t2(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f2) } +func TestTransportResPattern_c1h2d0t0(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f0) } +func TestTransportResPattern_c1h2d0t1(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f1) } +func TestTransportResPattern_c1h2d0t2(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f2) } +func TestTransportResPattern_c1h2d1t0(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f0) } +func TestTransportResPattern_c1h2d1t1(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f1) } +func TestTransportResPattern_c1h2d1t2(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f2) } +func TestTransportResPattern_c2h1d0t0(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f0) } +func TestTransportResPattern_c2h1d0t1(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f1) } +func TestTransportResPattern_c2h1d0t2(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f2) } +func TestTransportResPattern_c2h1d1t0(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f0) } +func TestTransportResPattern_c2h1d1t1(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f1) } +func TestTransportResPattern_c2h1d1t2(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f2) } +func TestTransportResPattern_c2h2d0t0(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f0) } +func TestTransportResPattern_c2h2d0t1(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f1) } +func TestTransportResPattern_c2h2d0t2(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f2) } +func TestTransportResPattern_c2h2d1t0(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f0) } +func TestTransportResPattern_c2h2d1t1(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f1) } +func TestTransportResPattern_c2h2d1t2(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f2) } + +func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerType, withData bool, trailers headerType) { + const reqBody = "some request body" + const resBody = "some response body" + + if resHeader == noHeader { + // TODO: test 100-continue followed by immediate + // server stream reset, without headers in the middle? + panic("invalid combination") + } + + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("POST", "https://dummy.tld/", strings.NewReader(reqBody)) + if expect100Continue != noHeader { + req.Header.Set("Expect", "100-continue") + } + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("Slurp: %v", err) + } + wantBody := resBody + if !withData { + wantBody = "" + } + if string(slurp) != wantBody { + return fmt.Errorf("body = %q; want %q", slurp, wantBody) + } + if trailers == noHeader { + if len(res.Trailer) > 0 { + t.Errorf("Trailer = %v; want none", res.Trailer) + } + } else { + want := http.Header{"Some-Trailer": {"some-value"}} + if !reflect.DeepEqual(res.Trailer, want) { + t.Errorf("Trailer = %v; want %v", res.Trailer, want) + } + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + endStream := false + send := func(mode headerType) { + hbf := buf.Bytes() + switch mode { + case oneHeader: + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.Header().StreamID, + EndHeaders: true, + EndStream: endStream, + BlockFragment: hbf, + }) + case splitHeader: + if len(hbf) < 2 { + panic("too small") + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.Header().StreamID, + EndHeaders: false, + EndStream: endStream, + BlockFragment: hbf[:1], + }) + ct.fr.WriteContinuation(f.Header().StreamID, true, hbf[1:]) + default: + panic("bogus mode") + } + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *DataFrame: + if !f.StreamEnded() { + // No need to send flow control tokens. The test request body is tiny. + continue + } + // Response headers (1+ frames; 1 or 2 in this test, but never 0) + { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "x-foo", Value: "blah"}) + enc.WriteField(hpack.HeaderField{Name: "x-bar", Value: "more"}) + if trailers != noHeader { + enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "some-trailer"}) + } + endStream = withData == false && trailers == noHeader + send(resHeader) + } + if withData { + endStream = trailers == noHeader + ct.fr.WriteData(f.StreamID, endStream, []byte(resBody)) + } + if trailers != noHeader { + endStream = true + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "some-value"}) + send(trailers) + } + if endStream { + return nil + } + case *HeadersFrame: + if expect100Continue != noHeader { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"}) + send(expect100Continue) + } + } + } + } + ct.run() +} + +func TestTransportReceiveUndeclaredTrailer(t *testing.T) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("res.Body ReadAll error = %q, %v; want %v", slurp, err, nil) + } + if len(slurp) > 0 { + return fmt.Errorf("body = %q; want nothing", slurp) + } + if _, ok := res.Trailer["Some-Trailer"]; !ok { + return fmt.Errorf("expected Some-Trailer") + } + return nil + } + ct.server = func() error { + ct.greet() + + var n int + var hf *HeadersFrame + for hf == nil && n < 10 { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + hf, _ = f.(*HeadersFrame) + n++ + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + // send headers without Trailer header + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + // send trailers + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "I'm an undeclared Trailer!"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + return nil + } + ct.run() +} + +func TestTransportInvalidTrailer_Pseudo1(t *testing.T) { + testTransportInvalidTrailer_Pseudo(t, oneHeader) +} +func TestTransportInvalidTrailer_Pseudo2(t *testing.T) { + testTransportInvalidTrailer_Pseudo(t, splitHeader) +} +func testTransportInvalidTrailer_Pseudo(t *testing.T, trailers headerType) { + testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: ":colon", Value: "foo"}) + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + }) +} + +func TestTransportInvalidTrailer_Capital1(t *testing.T) { + testTransportInvalidTrailer_Capital(t, oneHeader) +} +func TestTransportInvalidTrailer_Capital2(t *testing.T) { + testTransportInvalidTrailer_Capital(t, splitHeader) +} +func testTransportInvalidTrailer_Capital(t *testing.T, trailers headerType) { + testInvalidTrailer(t, trailers, headerFieldNameError("Capital"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + enc.WriteField(hpack.HeaderField{Name: "Capital", Value: "bad"}) + }) +} +func TestTransportInvalidTrailer_EmptyFieldName(t *testing.T) { + testInvalidTrailer(t, oneHeader, headerFieldNameError(""), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "", Value: "bad"}) + }) +} +func TestTransportInvalidTrailer_BinaryFieldValue(t *testing.T) { + testInvalidTrailer(t, oneHeader, headerFieldValueError("has\nnewline"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "x", Value: "has\nnewline"}) + }) +} + +func testInvalidTrailer(t *testing.T, trailers headerType, wantErr error, writeTrailer func(*hpack.Encoder)) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + se, ok := err.(StreamError) + if !ok || se.Cause != wantErr { + return fmt.Errorf("res.Body ReadAll error = %q, %#v; want StreamError with cause %T, %#v", slurp, err, wantErr, wantErr) + } + if len(slurp) > 0 { + return fmt.Errorf("body = %q; want nothing", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + switch f := f.(type) { + case *HeadersFrame: + var endStream bool + send := func(mode headerType) { + hbf := buf.Bytes() + switch mode { + case oneHeader: + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: endStream, + BlockFragment: hbf, + }) + case splitHeader: + if len(hbf) < 2 { + panic("too small") + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: false, + EndStream: endStream, + BlockFragment: hbf[:1], + }) + ct.fr.WriteContinuation(f.StreamID, true, hbf[1:]) + default: + panic("bogus mode") + } + } + // Response headers (1+ frames; 1 or 2 in this test, but never 0) + { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "declared"}) + endStream = false + send(oneHeader) + } + // Trailers: + { + endStream = true + buf.Reset() + writeTrailer(enc) + send(trailers) + } + return nil + } + } + } + ct.run() +} + +// headerListSize returns the HTTP2 header list size of h. +// http://httpwg.org/specs/rfc7540.html#SETTINGS_MAX_HEADER_LIST_SIZE +// http://httpwg.org/specs/rfc7540.html#MaxHeaderBlock +func headerListSize(h http.Header) (size uint32) { + for k, vv := range h { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + size += hf.Size() + } + } + return size +} + +// padHeaders adds data to an http.Header until headerListSize(h) == +// limit. Due to the way header list sizes are calculated, padHeaders +// cannot add fewer than len("Pad-Headers") + 32 bytes to h, and will +// call t.Fatal if asked to do so. PadHeaders first reserves enough +// space for an empty "Pad-Headers" key, then adds as many copies of +// filler as possible. Any remaining bytes necessary to push the +// header list size up to limit are added to h["Pad-Headers"]. +func padHeaders(t *testing.T, h http.Header, limit uint64, filler string) { + if limit > 0xffffffff { + t.Fatalf("padHeaders: refusing to pad to more than 2^32-1 bytes. limit = %v", limit) + } + hf := hpack.HeaderField{Name: "Pad-Headers", Value: ""} + minPadding := uint64(hf.Size()) + size := uint64(headerListSize(h)) + + minlimit := size + minPadding + if limit < minlimit { + t.Fatalf("padHeaders: limit %v < %v", limit, minlimit) + } + + // Use a fixed-width format for name so that fieldSize + // remains constant. + nameFmt := "Pad-Headers-%06d" + hf = hpack.HeaderField{Name: fmt.Sprintf(nameFmt, 1), Value: filler} + fieldSize := uint64(hf.Size()) + + // Add as many complete filler values as possible, leaving + // room for at least one empty "Pad-Headers" key. + limit = limit - minPadding + for i := 0; size+fieldSize < limit; i++ { + name := fmt.Sprintf(nameFmt, i) + h.Add(name, filler) + size += fieldSize + } + + // Add enough bytes to reach limit. + remain := limit - size + lastValue := strings.Repeat("*", int(remain)) + h.Add("Pad-Headers", lastValue) +} + +func TestPadHeaders(t *testing.T) { + check := func(h http.Header, limit uint32, fillerLen int) { + if h == nil { + h = make(http.Header) + } + filler := strings.Repeat("f", fillerLen) + padHeaders(t, h, uint64(limit), filler) + gotSize := headerListSize(h) + if gotSize != limit { + t.Errorf("Got size = %v; want %v", gotSize, limit) + } + } + // Try all possible combinations for small fillerLen and limit. + hf := hpack.HeaderField{Name: "Pad-Headers", Value: ""} + minLimit := hf.Size() + for limit := minLimit; limit <= 128; limit++ { + for fillerLen := 0; uint32(fillerLen) <= limit; fillerLen++ { + check(nil, limit, fillerLen) + } + } + + // Try a few tests with larger limits, plus cumulative + // tests. Since these tests are cumulative, tests[i+1].limit + // must be >= tests[i].limit + minLimit. See the comment on + // padHeaders for more info on why the limit arg has this + // restriction. + tests := []struct { + fillerLen int + limit uint32 + }{ + { + fillerLen: 64, + limit: 1024, + }, + { + fillerLen: 1024, + limit: 1286, + }, + { + fillerLen: 256, + limit: 2048, + }, + { + fillerLen: 1024, + limit: 10 * 1024, + }, + { + fillerLen: 1023, + limit: 11 * 1024, + }, + } + h := make(http.Header) + for _, tc := range tests { + check(nil, tc.limit, tc.fillerLen) + check(h, tc.limit, tc.fillerLen) + } +} + +func TestTransportChecksRequestHeaderListSize(t *testing.T) { + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + // Consume body & force client to send + // trailers before writing response. + // ioutil.ReadAll returns non-nil err for + // requests that attempt to send greater than + // maxHeaderListSize bytes of trailers, since + // those requests generate a stream reset. + ioutil.ReadAll(r.Body) + r.Body.Close() + }, + func(ts *httptest.Server) { + ts.Config.MaxHeaderBytes = 16 << 10 + }, + optOnlyServer, + optQuiet, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + checkRoundTrip := func(req *http.Request, wantErr error, desc string) { + res, err := tr.RoundTrip(req) + if err != wantErr { + if res != nil { + res.Body.Close() + } + t.Errorf("%v: RoundTrip err = %v; want %v", desc, err, wantErr) + return + } + if err == nil { + if res == nil { + t.Errorf("%v: response nil; want non-nil.", desc) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + t.Errorf("%v: response status = %v; want %v", desc, res.StatusCode, http.StatusOK) + } + return + } + if res != nil { + t.Errorf("%v: RoundTrip err = %v but response non-nil", desc, err) + } + } + headerListSizeForRequest := func(req *http.Request) (size uint64) { + contentLen := actualContentLength(req) + trailers, err := commaSeparatedTrailers(req) + if err != nil { + t.Fatalf("headerListSizeForRequest: %v", err) + } + cc := &ClientConn{peerMaxHeaderListSize: 0xffffffffffffffff} + cc.henc = hpack.NewEncoder(&cc.hbuf) + cc.mu.Lock() + hdrs, err := cc.encodeHeaders(req, true, trailers, contentLen) + cc.mu.Unlock() + if err != nil { + t.Fatalf("headerListSizeForRequest: %v", err) + } + hpackDec := hpack.NewDecoder(initialHeaderTableSize, func(hf hpack.HeaderField) { + size += uint64(hf.Size()) + }) + if len(hdrs) > 0 { + if _, err := hpackDec.Write(hdrs); err != nil { + t.Fatalf("headerListSizeForRequest: %v", err) + } + } + return size + } + // Create a new Request for each test, rather than reusing the + // same Request, to avoid a race when modifying req.Headers. + // See https://github.com/golang/go/issues/21316 + newRequest := func() *http.Request { + // Body must be non-nil to enable writing trailers. + body := strings.NewReader("hello") + req, err := http.NewRequest("POST", st.ts.URL, body) + if err != nil { + t.Fatalf("newRequest: NewRequest: %v", err) + } + return req + } + + // Make an arbitrary request to ensure we get the server's + // settings frame and initialize peerMaxHeaderListSize. + req := newRequest() + checkRoundTrip(req, nil, "Initial request") + + // Get the ClientConn associated with the request and validate + // peerMaxHeaderListSize. + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + cc, err := tr.connPool().GetClientConn(req, addr) + if err != nil { + t.Fatalf("GetClientConn: %v", err) + } + cc.mu.Lock() + peerSize := cc.peerMaxHeaderListSize + cc.mu.Unlock() + st.scMu.Lock() + wantSize := uint64(st.sc.maxHeaderListSize()) + st.scMu.Unlock() + if peerSize != wantSize { + t.Errorf("peerMaxHeaderListSize = %v; want %v", peerSize, wantSize) + } + + // Sanity check peerSize. (*serverConn) maxHeaderListSize adds + // 320 bytes of padding. + wantHeaderBytes := uint64(st.ts.Config.MaxHeaderBytes) + 320 + if peerSize != wantHeaderBytes { + t.Errorf("peerMaxHeaderListSize = %v; want %v.", peerSize, wantHeaderBytes) + } + + // Pad headers & trailers, but stay under peerSize. + req = newRequest() + req.Header = make(http.Header) + req.Trailer = make(http.Header) + filler := strings.Repeat("*", 1024) + padHeaders(t, req.Trailer, peerSize, filler) + // cc.encodeHeaders adds some default headers to the request, + // so we need to leave room for those. + defaultBytes := headerListSizeForRequest(req) + padHeaders(t, req.Header, peerSize-defaultBytes, filler) + checkRoundTrip(req, nil, "Headers & Trailers under limit") + + // Add enough header bytes to push us over peerSize. + req = newRequest() + req.Header = make(http.Header) + padHeaders(t, req.Header, peerSize, filler) + checkRoundTrip(req, errRequestHeaderListSize, "Headers over limit") + + // Push trailers over the limit. + req = newRequest() + req.Trailer = make(http.Header) + padHeaders(t, req.Trailer, peerSize+1, filler) + checkRoundTrip(req, errRequestHeaderListSize, "Trailers over limit") + + // Send headers with a single large value. + req = newRequest() + filler = strings.Repeat("*", int(peerSize)) + req.Header = make(http.Header) + req.Header.Set("Big", filler) + checkRoundTrip(req, errRequestHeaderListSize, "Single large header") + + // Send trailers with a single large value. + req = newRequest() + req.Trailer = make(http.Header) + req.Trailer.Set("Big", filler) + checkRoundTrip(req, errRequestHeaderListSize, "Single large trailer") +} + +func TestTransportChecksResponseHeaderListSize(t *testing.T) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != errResponseHeaderListSize { + if res != nil { + res.Body.Close() + } + size := int64(0) + for k, vv := range res.Header { + for _, v := range vv { + size += int64(len(k)) + int64(len(v)) + 32 + } + } + return fmt.Errorf("RoundTrip Error = %v (and %d bytes of response headers); want errResponseHeaderListSize", err, size) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + switch f := f.(type) { + case *HeadersFrame: + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + large := strings.Repeat("a", 1<<10) + for i := 0; i < 5042; i++ { + enc.WriteField(hpack.HeaderField{Name: large, Value: large}) + } + if size, want := buf.Len(), 6329; size != want { + // Note: this number might change if + // our hpack implementation + // changes. That's fine. This is + // just a sanity check that our + // response can fit in a single + // header block fragment frame. + return fmt.Errorf("encoding over 10MB of duplicate keypairs took %d bytes; expected %d", size, want) + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + return nil + } + } + } + ct.run() +} + +// Test that the Transport returns a typed error from Response.Body.Read calls +// when the server sends an error. (here we use a panic, since that should generate +// a stream error, but others like cancel should be similar) +func TestTransportBodyReadErrorType(t *testing.T) { + doPanic := make(chan bool, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() // force headers out + <-doPanic + panic("boom") + }, + optOnlyServer, + optQuiet, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + doPanic <- true + buf := make([]byte, 100) + n, err := res.Body.Read(buf) + want := StreamError{StreamID: 0x1, Code: 0x2} + if !reflect.DeepEqual(want, err) { + t.Errorf("Read = %v, %#v; want error %#v", n, err, want) + } +} + +// golang.org/issue/13924 +// This used to fail after many iterations, especially with -race: +// go test -v -run=TestTransportDoubleCloseOnWriteError -count=500 -race +func TestTransportDoubleCloseOnWriteError(t *testing.T) { + var ( + mu sync.Mutex + conn net.Conn // to close if set + ) + + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + defer mu.Unlock() + if conn != nil { + conn.Close() + } + }, + optOnlyServer, + ) + defer st.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + mu.Lock() + defer mu.Unlock() + conn = tc + return tc, nil + }, + } + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + c.Get(st.ts.URL) +} + +// Test that the http1 Transport.DisableKeepAlives option is respected +// and connections are closed as soon as idle. +// See golang.org/issue/14008 +func TestTransportDisableKeepAlives(t *testing.T) { + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "hi") + }, + optOnlyServer, + ) + defer st.Close() + + connClosed := make(chan struct{}) // closed on tls.Conn.Close + tr := &Transport{ + t1: &http.Transport{ + DisableKeepAlives: true, + }, + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + return ¬eCloseConn{Conn: tc, closefn: func() { close(connClosed) }}, nil + }, + } + c := &http.Client{Transport: tr} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + if _, err := ioutil.ReadAll(res.Body); err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + select { + case <-connClosed: + case <-time.After(1 * time.Second): + t.Errorf("timeout") + } + +} + +// Test concurrent requests with Transport.DisableKeepAlives. We can share connections, +// but when things are totally idle, it still needs to close. +func TestTransportDisableKeepAlives_Concurrency(t *testing.T) { + const D = 25 * time.Millisecond + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + time.Sleep(D) + io.WriteString(w, "hi") + }, + optOnlyServer, + ) + defer st.Close() + + var dials int32 + var conns sync.WaitGroup + tr := &Transport{ + t1: &http.Transport{ + DisableKeepAlives: true, + }, + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + atomic.AddInt32(&dials, 1) + conns.Add(1) + return ¬eCloseConn{Conn: tc, closefn: func() { conns.Done() }}, nil + }, + } + c := &http.Client{Transport: tr} + var reqs sync.WaitGroup + const N = 20 + for i := 0; i < N; i++ { + reqs.Add(1) + if i == N-1 { + // For the final request, try to make all the + // others close. This isn't verified in the + // count, other than the Log statement, since + // it's so timing dependent. This test is + // really to make sure we don't interrupt a + // valid request. + time.Sleep(D * 2) + } + go func() { + defer reqs.Done() + res, err := c.Get(st.ts.URL) + if err != nil { + t.Error(err) + return + } + if _, err := ioutil.ReadAll(res.Body); err != nil { + t.Error(err) + return + } + res.Body.Close() + }() + } + reqs.Wait() + conns.Wait() + t.Logf("did %d dials, %d requests", atomic.LoadInt32(&dials), N) +} + +type noteCloseConn struct { + net.Conn + onceClose sync.Once + closefn func() +} + +func (c *noteCloseConn) Close() error { + c.onceClose.Do(c.closefn) + return c.Conn.Close() +} + +func isTimeout(err error) bool { + switch err := err.(type) { + case nil: + return false + case *url.Error: + return isTimeout(err.Err) + case net.Error: + return err.Timeout() + } + return false +} + +// Test that the http1 Transport.ResponseHeaderTimeout option and cancel is sent. +func TestTransportResponseHeaderTimeout_NoBody(t *testing.T) { + testTransportResponseHeaderTimeout(t, false) +} +func TestTransportResponseHeaderTimeout_Body(t *testing.T) { + testTransportResponseHeaderTimeout(t, true) +} + +func testTransportResponseHeaderTimeout(t *testing.T, body bool) { + ct := newClientTester(t) + ct.tr.t1 = &http.Transport{ + ResponseHeaderTimeout: 5 * time.Millisecond, + } + ct.client = func() error { + c := &http.Client{Transport: ct.tr} + var err error + var n int64 + const bodySize = 4 << 20 + if body { + _, err = c.Post("https://dummy.tld/", "text/foo", io.LimitReader(countingReader{&n}, bodySize)) + } else { + _, err = c.Get("https://dummy.tld/") + } + if !isTimeout(err) { + t.Errorf("client expected timeout error; got %#v", err) + } + if body && n != bodySize { + t.Errorf("only read %d bytes of body; want %d", n, bodySize) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + switch f := f.(type) { + case *DataFrame: + dataLen := len(f.Data()) + if dataLen > 0 { + if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { + return err + } + if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil { + return err + } + } + case *RSTStreamFrame: + if f.StreamID == 1 && f.ErrCode == ErrCodeCancel { + return nil + } + } + } + } + ct.run() +} + +func TestTransportDisableCompression(t *testing.T) { + const body = "sup" + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + want := http.Header{ + "User-Agent": []string{"Go-http-client/2.0"}, + } + if !reflect.DeepEqual(r.Header, want) { + t.Errorf("request headers = %v; want %v", r.Header, want) + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + t1: &http.Transport{ + DisableCompression: true, + }, + } + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() +} + +// RFC 7540 section 8.1.2.2 +func TestTransportRejectsConnHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + var got []string + for k := range r.Header { + got = append(got, k) + } + sort.Strings(got) + w.Header().Set("Got-Header", strings.Join(got, ",")) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + tests := []struct { + key string + value []string + want string + }{ + { + key: "Upgrade", + value: []string{"anything"}, + want: "ERROR: http2: invalid Upgrade request header: [\"anything\"]", + }, + { + key: "Connection", + value: []string{"foo"}, + want: "ERROR: http2: invalid Connection request header: [\"foo\"]", + }, + { + key: "Connection", + value: []string{"close"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Connection", + value: []string{"close", "something-else"}, + want: "ERROR: http2: invalid Connection request header: [\"close\" \"something-else\"]", + }, + { + key: "Connection", + value: []string{"keep-alive"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Proxy-Connection", // just deleted and ignored + value: []string{"keep-alive"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{""}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{"foo"}, + want: "ERROR: http2: invalid Transfer-Encoding request header: [\"foo\"]", + }, + { + key: "Transfer-Encoding", + value: []string{"chunked"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{"chunked", "other"}, + want: "ERROR: http2: invalid Transfer-Encoding request header: [\"chunked\" \"other\"]", + }, + { + key: "Content-Length", + value: []string{"123"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Keep-Alive", + value: []string{"doop"}, + want: "Accept-Encoding,User-Agent", + }, + } + + for _, tt := range tests { + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Header[tt.key] = tt.value + res, err := tr.RoundTrip(req) + var got string + if err != nil { + got = fmt.Sprintf("ERROR: %v", err) + } else { + got = res.Header.Get("Got-Header") + res.Body.Close() + } + if got != tt.want { + t.Errorf("For key %q, value %q, got = %q; want %q", tt.key, tt.value, got, tt.want) + } + } +} + +// golang.org/issue/14048 +func TestTransportFailsOnInvalidHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + var got []string + for k := range r.Header { + got = append(got, k) + } + sort.Strings(got) + w.Header().Set("Got-Header", strings.Join(got, ",")) + }, optOnlyServer) + defer st.Close() + + tests := [...]struct { + h http.Header + wantErr string + }{ + 0: { + h: http.Header{"with space": {"foo"}}, + wantErr: `invalid HTTP header name "with space"`, + }, + 1: { + h: http.Header{"name": {"Брэд"}}, + wantErr: "", // okay + }, + 2: { + h: http.Header{"имя": {"Brad"}}, + wantErr: `invalid HTTP header name "имя"`, + }, + 3: { + h: http.Header{"foo": {"foo\x01bar"}}, + wantErr: `invalid HTTP header value "foo\x01bar" for header "foo"`, + }, + } + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + for i, tt := range tests { + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Header = tt.h + res, err := tr.RoundTrip(req) + var bad bool + if tt.wantErr == "" { + if err != nil { + bad = true + t.Errorf("case %d: error = %v; want no error", i, err) + } + } else { + if !strings.Contains(fmt.Sprint(err), tt.wantErr) { + bad = true + t.Errorf("case %d: error = %v; want error %q", i, err, tt.wantErr) + } + } + if err == nil { + if bad { + t.Logf("case %d: server got headers %q", i, res.Header.Get("Got-Header")) + } + res.Body.Close() + } + } +} + +// Tests that gzipReader doesn't crash on a second Read call following +// the first Read call's gzip.NewReader returning an error. +func TestGzipReader_DoubleReadCrash(t *testing.T) { + gz := &gzipReader{ + body: ioutil.NopCloser(strings.NewReader("0123456789")), + } + var buf [1]byte + n, err1 := gz.Read(buf[:]) + if n != 0 || !strings.Contains(fmt.Sprint(err1), "invalid header") { + t.Fatalf("Read = %v, %v; want 0, invalid header", n, err1) + } + n, err2 := gz.Read(buf[:]) + if n != 0 || err2 != err1 { + t.Fatalf("second Read = %v, %v; want 0, %v", n, err2, err1) + } +} + +func TestTransportNewTLSConfig(t *testing.T) { + tests := [...]struct { + conf *tls.Config + host string + want *tls.Config + }{ + // Normal case. + 0: { + conf: nil, + host: "foo.com", + want: &tls.Config{ + ServerName: "foo.com", + NextProtos: []string{NextProtoTLS}, + }, + }, + + // User-provided name (bar.com) takes precedence: + 1: { + conf: &tls.Config{ + ServerName: "bar.com", + }, + host: "foo.com", + want: &tls.Config{ + ServerName: "bar.com", + NextProtos: []string{NextProtoTLS}, + }, + }, + + // NextProto is prepended: + 2: { + conf: &tls.Config{ + NextProtos: []string{"foo", "bar"}, + }, + host: "example.com", + want: &tls.Config{ + ServerName: "example.com", + NextProtos: []string{NextProtoTLS, "foo", "bar"}, + }, + }, + + // NextProto is not duplicated: + 3: { + conf: &tls.Config{ + NextProtos: []string{"foo", "bar", NextProtoTLS}, + }, + host: "example.com", + want: &tls.Config{ + ServerName: "example.com", + NextProtos: []string{"foo", "bar", NextProtoTLS}, + }, + }, + } + for i, tt := range tests { + // Ignore the session ticket keys part, which ends up populating + // unexported fields in the Config: + if tt.conf != nil { + tt.conf.SessionTicketsDisabled = true + } + + tr := &Transport{TLSClientConfig: tt.conf} + got := tr.newTLSConfig(tt.host) + + got.SessionTicketsDisabled = false + + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. got %#v; want %#v", i, got, tt.want) + } + } +} + +// The Google GFE responds to HEAD requests with a HEADERS frame +// without END_STREAM, followed by a 0-length DATA frame with +// END_STREAM. Make sure we don't get confused by that. (We did.) +func TestTransportReadHeadResponse(t *testing.T) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + if res.ContentLength != 123 { + return fmt.Errorf("Content-Length = %d; want 123", res.ContentLength) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("ReadAll: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, // as the GFE does + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(hf.StreamID, true, nil) + + <-clientDone + return nil + } + } + ct.run() +} + +func TestTransportReadHeadResponseWithBody(t *testing.T) { + // This test use not valid response format. + // Discarding logger output to not spam tests output. + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stderr) + + response := "redirecting to /elsewhere" + ct := newClientTester(t) + clientDone := make(chan struct{}) + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + if res.ContentLength != int64(len(response)) { + return fmt.Errorf("Content-Length = %d; want %d", res.ContentLength, len(response)) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("ReadAll: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: strconv.Itoa(len(response))}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(hf.StreamID, true, []byte(response)) + + <-clientDone + return nil + } + } + ct.run() +} + +type neverEnding byte + +func (b neverEnding) Read(p []byte) (int, error) { + for i := range p { + p[i] = byte(b) + } + return len(p), nil +} + +// golang.org/issue/15425: test that a handler closing the request +// body doesn't terminate the stream to the peer. (It just stops +// readability from the handler's side, and eventually the client +// runs out of flow control tokens) +func TestTransportHandlerBodyClose(t *testing.T) { + const bodySize = 10 << 20 + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + r.Body.Close() + io.Copy(w, io.LimitReader(neverEnding('A'), bodySize)) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + g0 := runtime.NumGoroutine() + + const numReq = 10 + for i := 0; i < numReq; i++ { + req, err := http.NewRequest("POST", st.ts.URL, struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + if n != bodySize || err != nil { + t.Fatalf("req#%d: Copy = %d, %v; want %d, nil", i, n, err, bodySize) + } + } + tr.CloseIdleConnections() + + gd := runtime.NumGoroutine() - g0 + if gd > numReq/2 { + t.Errorf("appeared to leak goroutines") + } + +} + +// https://golang.org/issue/15930 +func TestTransportFlowControl(t *testing.T) { + const bufLen = 64 << 10 + var total int64 = 100 << 20 // 100MB + if testing.Short() { + total = 10 << 20 + } + + var wrote int64 // updated atomically + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + b := make([]byte, bufLen) + for wrote < total { + n, err := w.Write(b) + atomic.AddInt64(&wrote, int64(n)) + if err != nil { + t.Errorf("ResponseWriter.Write error: %v", err) + break + } + w.(http.Flusher).Flush() + } + }, optOnlyServer) + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal("NewRequest error:", err) + } + resp, err := tr.RoundTrip(req) + if err != nil { + t.Fatal("RoundTrip error:", err) + } + defer resp.Body.Close() + + var read int64 + b := make([]byte, bufLen) + for { + n, err := resp.Body.Read(b) + if err == io.EOF { + break + } + if err != nil { + t.Fatal("Read error:", err) + } + read += int64(n) + + const max = transportDefaultStreamFlow + if w := atomic.LoadInt64(&wrote); -max > read-w || read-w > max { + t.Fatalf("Too much data inflight: server wrote %v bytes but client only received %v", w, read) + } + + // Let the server get ahead of the client. + time.Sleep(1 * time.Millisecond) + } +} + +// golang.org/issue/14627 -- if the server sends a GOAWAY frame, make +// the Transport remember it and return it back to users (via +// RoundTrip or request body reads) if needed (e.g. if the server +// proceeds to close the TCP connection before the client gets its +// response) +func TestTransportUsesGoAwayDebugError_RoundTrip(t *testing.T) { + testTransportUsesGoAwayDebugError(t, false) +} + +func TestTransportUsesGoAwayDebugError_Body(t *testing.T) { + testTransportUsesGoAwayDebugError(t, true) +} + +func testTransportUsesGoAwayDebugError(t *testing.T, failMidBody bool) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + + const goAwayErrCode = ErrCodeHTTP11Required // arbitrary + const goAwayDebugData = "some debug data" + + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if failMidBody { + if err != nil { + return fmt.Errorf("unexpected client RoundTrip error: %v", err) + } + _, err = io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + } + want := GoAwayError{ + LastStreamID: 5, + ErrCode: goAwayErrCode, + DebugData: goAwayDebugData, + } + if !reflect.DeepEqual(err, want) { + t.Errorf("RoundTrip error = %T: %#v, want %T (%#v)", err, err, want, want) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + if failMidBody { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + } + // Write two GOAWAY frames, to test that the Transport takes + // the interesting parts of both. + ct.fr.WriteGoAway(5, ErrCodeNo, []byte(goAwayDebugData)) + ct.fr.WriteGoAway(5, goAwayErrCode, nil) + ct.sc.(*net.TCPConn).CloseWrite() + <-clientDone + return nil + } + } + ct.run() +} + +func testTransportReturnsUnusedFlowControl(t *testing.T, oneDataFrame bool) { + ct := newClientTester(t) + + clientClosed := make(chan struct{}) + serverWroteFirstByte := make(chan struct{}) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + <-serverWroteFirstByte + + if n, err := res.Body.Read(make([]byte, 1)); err != nil || n != 1 { + return fmt.Errorf("body read = %v, %v; want 1, nil", n, err) + } + res.Body.Close() // leaving 4999 bytes unread + close(clientClosed) + + return nil + } + ct.server = func() error { + ct.greet() + + var hf *HeadersFrame + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + var ok bool + hf, ok = f.(*HeadersFrame) + if !ok { + return fmt.Errorf("Got %T; want HeadersFrame", f) + } + break + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + // Two cases: + // - Send one DATA frame with 5000 bytes. + // - Send two DATA frames with 1 and 4999 bytes each. + // + // In both cases, the client should consume one byte of data, + // refund that byte, then refund the following 4999 bytes. + // + // In the second case, the server waits for the client connection to + // close before seconding the second DATA frame. This tests the case + // where the client receives a DATA frame after it has reset the stream. + if oneDataFrame { + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 5000)) + close(serverWroteFirstByte) + <-clientClosed + } else { + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 1)) + close(serverWroteFirstByte) + <-clientClosed + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 4999)) + } + + waitingFor := "RSTStreamFrame" + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for %s: %v", waitingFor, err) + } + if _, ok := f.(*SettingsFrame); ok { + continue + } + switch waitingFor { + case "RSTStreamFrame": + if rf, ok := f.(*RSTStreamFrame); !ok || rf.ErrCode != ErrCodeCancel { + return fmt.Errorf("Expected a RSTStreamFrame with code cancel; got %v", summarizeFrame(f)) + } + waitingFor = "WindowUpdateFrame" + case "WindowUpdateFrame": + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != 4999 { + return fmt.Errorf("Expected WindowUpdateFrame for 4999 bytes; got %v", summarizeFrame(f)) + } + return nil + } + } + } + ct.run() +} + +// See golang.org/issue/16481 +func TestTransportReturnsUnusedFlowControlSingleWrite(t *testing.T) { + testTransportReturnsUnusedFlowControl(t, true) +} + +// See golang.org/issue/20469 +func TestTransportReturnsUnusedFlowControlMultipleWrites(t *testing.T) { + testTransportReturnsUnusedFlowControl(t, false) +} + +// Issue 16612: adjust flow control on open streams when transport +// receives SETTINGS with INITIAL_WINDOW_SIZE from server. +func TestTransportAdjustsFlowControl(t *testing.T) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + + const bodySize = 1 << 20 + + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + + req, _ := http.NewRequest("POST", "https://dummy.tld/", struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + res.Body.Close() + return nil + } + ct.server = func() error { + _, err := io.ReadFull(ct.sc, make([]byte, len(ClientPreface))) + if err != nil { + return fmt.Errorf("reading client preface: %v", err) + } + + var gotBytes int64 + var sentSettings bool + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + return nil + default: + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + } + switch f := f.(type) { + case *DataFrame: + gotBytes += int64(len(f.Data())) + // After we've got half the client's + // initial flow control window's worth + // of request body data, give it just + // enough flow control to finish. + if gotBytes >= initialWindowSize/2 && !sentSettings { + sentSettings = true + + ct.fr.WriteSettings(Setting{ID: SettingInitialWindowSize, Val: bodySize}) + ct.fr.WriteWindowUpdate(0, bodySize) + ct.fr.WriteSettingsAck() + } + + if f.StreamEnded() { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + } + } + } + } + ct.run() +} + +// See golang.org/issue/16556 +func TestTransportReturnsDataPaddingFlowControl(t *testing.T) { + ct := newClientTester(t) + + unblockClient := make(chan bool, 1) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + defer res.Body.Close() + <-unblockClient + return nil + } + ct.server = func() error { + ct.greet() + + var hf *HeadersFrame + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + var ok bool + hf, ok = f.(*HeadersFrame) + if !ok { + return fmt.Errorf("Got %T; want HeadersFrame", f) + } + break + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + pad := make([]byte, 5) + ct.fr.WriteDataPadded(hf.StreamID, false, make([]byte, 5000), pad) // without ending stream + + f, err := ct.readNonSettingsFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for first WindowUpdateFrame: %v", err) + } + wantBack := uint32(len(pad)) + 1 // one byte for the length of the padding + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID != 0 { + return fmt.Errorf("Expected conn WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f)) + } + + f, err = ct.readNonSettingsFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for second WindowUpdateFrame: %v", err) + } + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID == 0 { + return fmt.Errorf("Expected stream WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f)) + } + unblockClient <- true + return nil + } + ct.run() +} + +// golang.org/issue/16572 -- RoundTrip shouldn't hang when it gets a +// StreamError as a result of the response HEADERS +func TestTransportReturnsErrorOnBadResponseHeaders(t *testing.T) { + ct := newClientTester(t) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err == nil { + res.Body.Close() + return errors.New("unexpected successful GET") + } + want := StreamError{1, ErrCodeProtocol, headerFieldNameError(" content-type")} + if !reflect.DeepEqual(want, err) { + t.Errorf("RoundTrip error = %#v; want %#v", err, want) + } + return nil + } + ct.server = func() error { + ct.greet() + + hf, err := ct.firstHeaders() + if err != nil { + return err + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: " content-type", Value: "bogus"}) // bogus spaces + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + for { + fr, err := ct.readFrame() + if err != nil { + return fmt.Errorf("error waiting for RST_STREAM from client: %v", err) + } + if _, ok := fr.(*SettingsFrame); ok { + continue + } + if rst, ok := fr.(*RSTStreamFrame); !ok || rst.StreamID != 1 || rst.ErrCode != ErrCodeProtocol { + t.Errorf("Frame = %v; want RST_STREAM for stream 1 with ErrCodeProtocol", summarizeFrame(fr)) + } + break + } + + return nil + } + ct.run() +} + +// byteAndEOFReader returns is in an io.Reader which reads one byte +// (the underlying byte) and io.EOF at once in its Read call. +type byteAndEOFReader byte + +func (b byteAndEOFReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + panic("unexpected useless call") + } + p[0] = byte(b) + return 1, io.EOF +} + +// Issue 16788: the Transport had a regression where it started +// sending a spurious DATA frame with a duplicate END_STREAM bit after +// the request body writer goroutine had already read an EOF from the +// Request.Body and included the END_STREAM on a data-carrying DATA +// frame. +// +// Notably, to trigger this, the requests need to use a Request.Body +// which returns (non-0, io.EOF) and also needs to set the ContentLength +// explicitly. +func TestTransportBodyDoubleEndStream(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // Nothing. + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + for i := 0; i < 2; i++ { + req, _ := http.NewRequest("POST", st.ts.URL, byteAndEOFReader('a')) + req.ContentLength = 1 + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatalf("failure on req %d: %v", i+1, err) + } + defer res.Body.Close() + } +} + +// golang.org/issue/16847, golang.org/issue/19103 +func TestTransportRequestPathPseudo(t *testing.T) { + type result struct { + path string + err string + } + tests := []struct { + req *http.Request + want result + }{ + 0: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Host: "foo.com", + Path: "/foo", + }, + }, + want: result{path: "/foo"}, + }, + // In Go 1.7, we accepted paths of "//foo". + // In Go 1.8, we rejected it (issue 16847). + // In Go 1.9, we accepted it again (issue 19103). + 1: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Host: "foo.com", + Path: "//foo", + }, + }, + want: result{path: "//foo"}, + }, + + // Opaque with //$Matching_Hostname/path + 2: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "https", + Opaque: "//foo.com/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque with some other Request.Host instead: + 3: { + req: &http.Request{ + Method: "GET", + Host: "bar.com", + URL: &url.URL{ + Scheme: "https", + Opaque: "//bar.com/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque without the leading "//": + 4: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Opaque: "/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque we can't handle: + 5: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "https", + Opaque: "//unknown_host/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{err: `invalid request :path "https://unknown_host/path" from URL.Opaque = "//unknown_host/path"`}, + }, + + // A CONNECT request: + 6: { + req: &http.Request{ + Method: "CONNECT", + URL: &url.URL{ + Host: "foo.com", + }, + }, + want: result{}, + }, + } + for i, tt := range tests { + cc := &ClientConn{peerMaxHeaderListSize: 0xffffffffffffffff} + cc.henc = hpack.NewEncoder(&cc.hbuf) + cc.mu.Lock() + hdrs, err := cc.encodeHeaders(tt.req, false, "", -1) + cc.mu.Unlock() + var got result + hpackDec := hpack.NewDecoder(initialHeaderTableSize, func(f hpack.HeaderField) { + if f.Name == ":path" { + got.path = f.Value + } + }) + if err != nil { + got.err = err.Error() + } else if len(hdrs) > 0 { + if _, err := hpackDec.Write(hdrs); err != nil { + t.Errorf("%d. bogus hpack: %v", i, err) + continue + } + } + if got != tt.want { + t.Errorf("%d. got %+v; want %+v", i, got, tt.want) + } + + } + +} + +// golang.org/issue/17071 -- don't sniff the first byte of the request body +// before we've determined that the ClientConn is usable. +func TestRoundTripDoesntConsumeRequestBodyEarly(t *testing.T) { + const body = "foo" + req, _ := http.NewRequest("POST", "http://foo.com/", ioutil.NopCloser(strings.NewReader(body))) + cc := &ClientConn{ + closed: true, + } + _, err := cc.RoundTrip(req) + if err != errClientConnUnusable { + t.Fatalf("RoundTrip = %v; want errClientConnUnusable", err) + } + slurp, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("ReadAll = %v", err) + } + if string(slurp) != body { + t.Errorf("Body = %q; want %q", slurp, body) + } +} + +func TestClientConnPing(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, optOnlyServer) + defer st.Close() + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + cc, err := tr.dialClientConn(st.ts.Listener.Addr().String(), false) + if err != nil { + t.Fatal(err) + } + if err = cc.Ping(testContext{}); err != nil { + t.Fatal(err) + } +} + +// Issue 16974: if the server sent a DATA frame after the user +// canceled the Transport's Request, the Transport previously wrote to a +// closed pipe, got an error, and ended up closing the whole TCP +// connection. +func TestTransportCancelDataResponseRace(t *testing.T) { + cancel := make(chan struct{}) + clientGotError := make(chan bool, 1) + + const msg = "Hello." + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if strings.Contains(r.URL.Path, "/hello") { + time.Sleep(50 * time.Millisecond) + io.WriteString(w, msg) + return + } + for i := 0; i < 50; i++ { + io.WriteString(w, "Some data.") + w.(http.Flusher).Flush() + if i == 2 { + close(cancel) + <-clientGotError + } + time.Sleep(10 * time.Millisecond) + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + c := &http.Client{Transport: tr} + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Cancel = cancel + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + if _, err = io.Copy(ioutil.Discard, res.Body); err == nil { + t.Fatal("unexpected success") + } + clientGotError <- true + + res, err = c.Get(st.ts.URL + "/hello") + if err != nil { + t.Fatal(err) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(slurp) != msg { + t.Errorf("Got = %q; want %q", slurp, msg) + } +} + +// Issue 21316: It should be safe to reuse an http.Request after the +// request has completed. +func TestTransportNoRaceOnRequestObjectAfterRequestComplete(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + io.WriteString(w, "body") + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, _ := http.NewRequest("GET", st.ts.URL, nil) + resp, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if _, err = io.Copy(ioutil.Discard, resp.Body); err != nil { + t.Fatalf("error reading response body: %v", err) + } + if err := resp.Body.Close(); err != nil { + t.Fatalf("error closing response body: %v", err) + } + + // This access of req.Header should not race with code in the transport. + req.Header = http.Header{} +} + +func TestTransportRetryAfterGOAWAY(t *testing.T) { + var dialer struct { + sync.Mutex + count int + } + ct1 := make(chan *clientTester) + ct2 := make(chan *clientTester) + + ln := newLocalListener(t) + defer ln.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + } + tr.DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) { + dialer.Lock() + defer dialer.Unlock() + dialer.count++ + if dialer.count == 3 { + return nil, errors.New("unexpected number of dials") + } + cc, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + return nil, fmt.Errorf("dial error: %v", err) + } + sc, err := ln.Accept() + if err != nil { + return nil, fmt.Errorf("accept error: %v", err) + } + ct := &clientTester{ + t: t, + tr: tr, + cc: cc, + sc: sc, + fr: NewFramer(sc, sc), + } + switch dialer.count { + case 1: + ct1 <- ct + case 2: + ct2 <- ct + } + return cc, nil + } + + errs := make(chan error, 3) + done := make(chan struct{}) + defer close(done) + + // Client. + go func() { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := tr.RoundTrip(req) + if res != nil { + res.Body.Close() + if got := res.Header.Get("Foo"); got != "bar" { + err = fmt.Errorf("foo header = %q; want bar", got) + } + } + if err != nil { + err = fmt.Errorf("RoundTrip: %v", err) + } + errs <- err + }() + + connToClose := make(chan io.Closer, 2) + + // Server for the first request. + go func() { + var ct *clientTester + select { + case ct = <-ct1: + case <-done: + return + } + + connToClose <- ct.cc + ct.greet() + hf, err := ct.firstHeaders() + if err != nil { + errs <- fmt.Errorf("server1 failed reading HEADERS: %v", err) + return + } + t.Logf("server1 got %v", hf) + if err := ct.fr.WriteGoAway(0 /*max id*/, ErrCodeNo, nil); err != nil { + errs <- fmt.Errorf("server1 failed writing GOAWAY: %v", err) + return + } + errs <- nil + }() + + // Server for the second request. + go func() { + var ct *clientTester + select { + case ct = <-ct2: + case <-done: + return + } + + connToClose <- ct.cc + ct.greet() + hf, err := ct.firstHeaders() + if err != nil { + errs <- fmt.Errorf("server2 failed reading HEADERS: %v", err) + return + } + t.Logf("server2 got %v", hf) + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + err = ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + if err != nil { + errs <- fmt.Errorf("server2 failed writing response HEADERS: %v", err) + } else { + errs <- nil + } + }() + + for k := 0; k < 3; k++ { + select { + case err := <-errs: + if err != nil { + t.Error(err) + } + case <-time.After(1 * time.Second): + t.Errorf("timed out") + } + } + + for { + select { + case c := <-connToClose: + c.Close() + default: + return + } + } +} + +func TestTransportRetryAfterRefusedStream(t *testing.T) { + clientDone := make(chan struct{}) + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + resp, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + resp.Body.Close() + if resp.StatusCode != 204 { + return fmt.Errorf("Status = %v; want 204", resp.StatusCode) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + nreq := 0 + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it + // will have reported any + // errors on its side. + return nil + default: + return err + } + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + nreq++ + if nreq == 1 { + ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream) + } else { + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} + +func TestTransportRetryHasLimit(t *testing.T) { + // Skip in short mode because the total expected delay is 1s+2s+4s+8s+16s=29s. + if testing.Short() { + t.Skip("skipping long test in short mode") + } + clientDone := make(chan struct{}) + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + resp, err := ct.tr.RoundTrip(req) + if err == nil { + return fmt.Errorf("RoundTrip expected error, got response: %+v", resp) + } + t.Logf("expected error, got: %v", err) + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it + // will have reported any + // errors on its side. + return nil + default: + return err + } + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream) + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} + +func TestTransportResponseDataBeforeHeaders(t *testing.T) { + // This test use not valid response format. + // Discarding logger output to not spam tests output. + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stderr) + + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + req := httptest.NewRequest("GET", "https://dummy.tld/", nil) + // First request is normal to ensure the check is per stream and not per connection. + _, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip expected no error, got: %v", err) + } + // Second request returns a DATA frame with no HEADERS. + resp, err := ct.tr.RoundTrip(req) + if err == nil { + return fmt.Errorf("RoundTrip expected error, got response: %+v", resp) + } + if err, ok := err.(StreamError); !ok || err.Code != ErrCodeProtocol { + return fmt.Errorf("expected stream PROTOCOL_ERROR, got: %v", err) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + switch f.StreamID { + case 1: + // Send a valid response to first request. + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + case 3: + ct.fr.WriteData(f.StreamID, true, []byte("payload")) + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} +func TestTransportRequestsStallAtServerLimit(t *testing.T) { + const maxConcurrent = 2 + + greet := make(chan struct{}) // server sends initial SETTINGS frame + gotRequest := make(chan struct{}) // server received a request + clientDone := make(chan struct{}) + + // Collect errors from goroutines. + var wg sync.WaitGroup + errs := make(chan error, 100) + defer func() { + wg.Wait() + close(errs) + for err := range errs { + t.Error(err) + } + }() + + // We will send maxConcurrent+2 requests. This checker goroutine waits for the + // following stages: + // 1. The first maxConcurrent requests are received by the server. + // 2. The client will cancel the next request + // 3. The server is unblocked so it can service the first maxConcurrent requests + // 4. The client will send the final request + wg.Add(1) + unblockClient := make(chan struct{}) + clientRequestCancelled := make(chan struct{}) + unblockServer := make(chan struct{}) + go func() { + defer wg.Done() + // Stage 1. + for k := 0; k < maxConcurrent; k++ { + <-gotRequest + } + // Stage 2. + close(unblockClient) + <-clientRequestCancelled + // Stage 3: give some time for the final RoundTrip call to be scheduled and + // verify that the final request is not sent. + time.Sleep(50 * time.Millisecond) + select { + case <-gotRequest: + errs <- errors.New("last request did not stall") + close(unblockServer) + return + default: + } + close(unblockServer) + // Stage 4. + <-gotRequest + }() + + ct := newClientTester(t) + ct.client = func() error { + var wg sync.WaitGroup + defer func() { + wg.Wait() + close(clientDone) + ct.cc.(*net.TCPConn).CloseWrite() + }() + for k := 0; k < maxConcurrent+2; k++ { + wg.Add(1) + go func(k int) { + defer wg.Done() + // Don't send the second request until after receiving SETTINGS from the server + // to avoid a race where we use the default SettingMaxConcurrentStreams, which + // is much larger than maxConcurrent. We have to send the first request before + // waiting because the first request triggers the dial and greet. + if k > 0 { + <-greet + } + // Block until maxConcurrent requests are sent before sending any more. + if k >= maxConcurrent { + <-unblockClient + } + req, _ := http.NewRequest("GET", fmt.Sprintf("https://dummy.tld/%d", k), nil) + if k == maxConcurrent { + // This request will be canceled. + cancel := make(chan struct{}) + req.Cancel = cancel + close(cancel) + _, err := ct.tr.RoundTrip(req) + close(clientRequestCancelled) + if err == nil { + errs <- fmt.Errorf("RoundTrip(%d) should have failed due to cancel", k) + return + } + } else { + resp, err := ct.tr.RoundTrip(req) + if err != nil { + errs <- fmt.Errorf("RoundTrip(%d): %v", k, err) + return + } + ioutil.ReadAll(resp.Body) + resp.Body.Close() + if resp.StatusCode != 204 { + errs <- fmt.Errorf("Status = %v; want 204", resp.StatusCode) + return + } + } + }(k) + } + return nil + } + + ct.server = func() error { + var wg sync.WaitGroup + defer wg.Wait() + + ct.greet(Setting{SettingMaxConcurrentStreams, maxConcurrent}) + + // Server write loop. + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + writeResp := make(chan uint32, maxConcurrent+1) + + wg.Add(1) + go func() { + defer wg.Done() + <-unblockServer + for id := range writeResp { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: id, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + } + }() + + // Server read loop. + var nreq int + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it will have reported any errors on its side. + return nil + default: + return err + } + } + switch f := f.(type) { + case *WindowUpdateFrame: + case *SettingsFrame: + // Wait for the client SETTINGS ack until ending the greet. + close(greet) + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + gotRequest <- struct{}{} + nreq++ + writeResp <- f.StreamID + if nreq == maxConcurrent+1 { + close(writeResp) + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + + ct.run() +} + +func TestAuthorityAddr(t *testing.T) { + tests := []struct { + scheme, authority string + want string + }{ + {"http", "foo.com", "foo.com:80"}, + {"https", "foo.com", "foo.com:443"}, + {"https", "foo.com:1234", "foo.com:1234"}, + {"https", "1.2.3.4:1234", "1.2.3.4:1234"}, + {"https", "1.2.3.4", "1.2.3.4:443"}, + {"https", "[::1]:1234", "[::1]:1234"}, + {"https", "[::1]", "[::1]:443"}, + } + for _, tt := range tests { + got := authorityAddr(tt.scheme, tt.authority) + if got != tt.want { + t.Errorf("authorityAddr(%q, %q) = %q; want %q", tt.scheme, tt.authority, got, tt.want) + } + } +} + +// Issue 20448: stop allocating for DATA frames' payload after +// Response.Body.Close is called. +func TestTransportAllocationsAfterResponseBodyClose(t *testing.T) { + megabyteZero := make([]byte, 1<<20) + + writeErr := make(chan error, 1) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() + var sum int64 + for i := 0; i < 100; i++ { + n, err := w.Write(megabyteZero) + sum += int64(n) + if err != nil { + writeErr <- err + return + } + } + t.Logf("wrote all %d bytes", sum) + writeErr <- nil + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + var buf [1]byte + if _, err := res.Body.Read(buf[:]); err != nil { + t.Error(err) + } + if err := res.Body.Close(); err != nil { + t.Error(err) + } + + trb, ok := res.Body.(transportResponseBody) + if !ok { + t.Fatalf("res.Body = %T; want transportResponseBody", res.Body) + } + if trb.cs.bufPipe.b != nil { + t.Errorf("response body pipe is still open") + } + + gotErr := <-writeErr + if gotErr == nil { + t.Errorf("Handler unexpectedly managed to write its entire response without getting an error") + } else if gotErr != errStreamClosed { + t.Errorf("Handler Write err = %v; want errStreamClosed", gotErr) + } +} + +// Issue 18891: make sure Request.Body == NoBody means no DATA frame +// is ever sent, even if empty. +func TestTransportNoBodyMeansNoDATA(t *testing.T) { + ct := newClientTester(t) + + unblockClient := make(chan bool) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", go18httpNoBody()) + ct.tr.RoundTrip(req) + <-unblockClient + return nil + } + ct.server = func() error { + defer close(unblockClient) + defer ct.cc.(*net.TCPConn).Close() + ct.greet() + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f := f.(type) { + default: + return fmt.Errorf("Got %T; want HeadersFrame", f) + case *WindowUpdateFrame, *SettingsFrame: + continue + case *HeadersFrame: + if !f.StreamEnded() { + return fmt.Errorf("got headers frame without END_STREAM") + } + return nil + } + } + } + ct.run() +} + +func benchSimpleRoundTrip(b *testing.B, nHeaders int) { + defer disableGoroutineTracking()() + b.ReportAllocs() + st := newServerTester(b, + func(w http.ResponseWriter, r *http.Request) { + }, + optOnlyServer, + optQuiet, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + b.Fatal(err) + } + + for i := 0; i < nHeaders; i++ { + name := fmt.Sprint("A-", i) + req.Header.Set(name, "*") + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + res, err := tr.RoundTrip(req) + if err != nil { + if res != nil { + res.Body.Close() + } + b.Fatalf("RoundTrip err = %v; want nil", err) + } + res.Body.Close() + if res.StatusCode != http.StatusOK { + b.Fatalf("Response code = %v; want %v", res.StatusCode, http.StatusOK) + } + } +} + +type infiniteReader struct{} + +func (r infiniteReader) Read(b []byte) (int, error) { + return len(b), nil +} + +// Issue 20521: it is not an error to receive a response and end stream +// from the server without the body being consumed. +func TestTransportResponseAndResetWithoutConsumingBodyRace(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + // The request body needs to be big enough to trigger flow control. + req, _ := http.NewRequest("PUT", st.ts.URL, infiniteReader{}) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != http.StatusOK { + t.Fatalf("Response code = %v; want %v", res.StatusCode, http.StatusOK) + } +} + +// Verify transport doesn't crash when receiving bogus response lacking a :status header. +// Issue 22880. +func TestTransportHandlesInvalidStatuslessResponse(t *testing.T) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + _, err := ct.tr.RoundTrip(req) + const substr = "malformed response from server: missing status pseudo header" + if !strings.Contains(fmt.Sprint(err), substr) { + return fmt.Errorf("RoundTrip error = %v; want substring %q", err, substr) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + switch f := f.(type) { + case *HeadersFrame: + enc.WriteField(hpack.HeaderField{Name: "content-type", Value: "text/html"}) // no :status header + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: false, // we'll send some DATA to try to crash the transport + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(f.StreamID, true, []byte("payload")) + return nil + } + } + } + ct.run() +} + +func BenchmarkClientRequestHeaders(b *testing.B) { + b.Run(" 0 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 0) }) + b.Run(" 10 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 10) }) + b.Run(" 100 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 100) }) + b.Run("1000 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 1000) }) +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go new file mode 100644 index 000000000..54ab4a88e --- /dev/null +++ b/vendor/golang.org/x/net/http2/write.go @@ -0,0 +1,365 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" +) + +// writeFramer is implemented by any type that is used to write frames. +type writeFramer interface { + writeFrame(writeContext) error + + // staysWithinBuffer reports whether this writer promises that + // it will only write less than or equal to size bytes, and it + // won't Flush the write context. + staysWithinBuffer(size int) bool +} + +// writeContext is the interface needed by the various frame writer +// types below. All the writeFrame methods below are scheduled via the +// frame writing scheduler (see writeScheduler in writesched.go). +// +// This interface is implemented by *serverConn. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. +type writeContext interface { + Framer() *Framer + Flush() error + CloseConn() error + // HeaderEncoder returns an HPACK encoder that writes to the + // returned buffer. + HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) +} + +// writeEndsStream reports whether w writes a frame that will transition +// the stream to a half-closed local state. This returns false for RST_STREAM, +// which closes the entire stream (not just the local half). +func writeEndsStream(w writeFramer) bool { + switch v := w.(type) { + case *writeData: + return v.endStream + case *writeResHeaders: + return v.endStream + case nil: + // This can only happen if the caller reuses w after it's + // been intentionally nil'ed out to prevent use. Keep this + // here to catch future refactoring breaking it. + panic("writeEndsStream called on nil writeFramer") + } + return false +} + +type flushFrameWriter struct{} + +func (flushFrameWriter) writeFrame(ctx writeContext) error { + return ctx.Flush() +} + +func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } + +type writeSettings []Setting + +func (s writeSettings) staysWithinBuffer(max int) bool { + const settingSize = 6 // uint16 + uint32 + return frameHeaderLen+settingSize*len(s) <= max + +} + +func (s writeSettings) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettings([]Setting(s)...) +} + +type writeGoAway struct { + maxStreamID uint32 + code ErrCode +} + +func (p *writeGoAway) writeFrame(ctx writeContext) error { + err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) + ctx.Flush() // ignore error: we're hanging up on them anyway + return err +} + +func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes + +type writeData struct { + streamID uint32 + p []byte + endStream bool +} + +func (w *writeData) String() string { + return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) +} + +func (w *writeData) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) +} + +func (w *writeData) staysWithinBuffer(max int) bool { + return frameHeaderLen+len(w.p) <= max +} + +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + +func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (se StreamError) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) +} + +func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +type writePingAck struct{ pf *PingFrame } + +func (w writePingAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(true, w.pf.Data) +} + +func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } + +type writeSettingsAck struct{} + +func (writeSettingsAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettingsAck() +} + +func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } + +// splitHeaderBlock splits headerBlock into fragments so that each fragment fits +// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true +// for the first/last fragment, respectively. +func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { + return err + } + first = false + } + return nil +} + +// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames +// for HTTP response headers or trailers from a server handler. +type writeResHeaders struct { + streamID uint32 + httpResCode int // 0 means no ":status" line + h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. + endStream bool + + date string + contentType string + contentLength string +} + +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + +func (w *writeResHeaders) staysWithinBuffer(max int) bool { + // TODO: this is a common one. It'd be nice to return true + // here and get into the fast path if we could be clever and + // calculate the size fast enough, or at least a conservative + // uppper bound that usually fires. (Maybe if w.h and + // w.trailers are nil, so we don't need to enumerate it.) + // Otherwise I'm afraid that just calculating the length to + // answer this question would be slower than the ~2µs benefit. + return false +} + +func (w *writeResHeaders) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) + } + + encodeHeaders(enc, w.h, w.trailers) + + if w.contentType != "" { + encKV(enc, "content-type", w.contentType) + } + if w.contentLength != "" { + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) + } + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 && w.trailers == nil { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. +type writePushPromise struct { + streamID uint32 // pusher stream + method string // for :method + url *url.URL // for :scheme, :authority, :path + h http.Header + + // Creates an ID for a pushed stream. This runs on serveG just before + // the frame is written. The returned ID is copied to promisedID. + allocatePromisedID func() (uint32, error) + promisedID uint32 +} + +func (w *writePushPromise) staysWithinBuffer(max int) bool { + // TODO: see writeResHeaders.staysWithinBuffer + return false +} + +func (w *writePushPromise) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + encKV(enc, ":method", w.method) + encKV(enc, ":scheme", w.url.Scheme) + encKV(enc, ":authority", w.url.Host) + encKV(enc, ":path", w.url.RequestURI()) + encodeHeaders(enc, w.h, nil) + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WritePushPromise(PushPromiseParam{ + StreamID: w.streamID, + PromiseID: w.promisedID, + BlockFragment: frag, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +type write100ContinueHeadersFrame struct { + streamID uint32 +} + +func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + encKV(enc, ":status", "100") + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: buf.Bytes(), + EndStream: false, + EndHeaders: true, + }) +} + +func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { + // Sloppy but conservative: + return 9+2*(len(":status")+len("100")) <= max +} + +type writeWindowUpdate struct { + streamID uint32 // or 0 for conn-level + n uint32 +} + +func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) +} + +// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) +// is encoded only only if k is in keys. +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validWireHeaderFieldName(k) { + // Skip it as backup paranoia. Per + // golang.org/issue/14048, these should + // already be rejected at a higher level. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go new file mode 100644 index 000000000..4fe307307 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -0,0 +1,242 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "fmt" + +// WriteScheduler is the interface implemented by HTTP/2 write schedulers. +// Methods are never called concurrently. +type WriteScheduler interface { + // OpenStream opens a new stream in the write scheduler. + // It is illegal to call this with streamID=0 or with a streamID that is + // already open -- the call may panic. + OpenStream(streamID uint32, options OpenStreamOptions) + + // CloseStream closes a stream in the write scheduler. Any frames queued on + // this stream should be discarded. It is illegal to call this on a stream + // that is not open -- the call may panic. + CloseStream(streamID uint32) + + // AdjustStream adjusts the priority of the given stream. This may be called + // on a stream that has not yet been opened or has been closed. Note that + // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: + // https://tools.ietf.org/html/rfc7540#section-5.1 + AdjustStream(streamID uint32, priority PriorityParam) + + // Push queues a frame in the scheduler. In most cases, this will not be + // called with wr.StreamID()!=0 unless that stream is currently open. The one + // exception is RST_STREAM frames, which may be sent on idle or closed streams. + Push(wr FrameWriteRequest) + + // Pop dequeues the next frame to write. Returns false if no frames can + // be written. Frames with a given wr.StreamID() are Pop'd in the same + // order they are Push'd. + Pop() (wr FrameWriteRequest, ok bool) +} + +// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +type OpenStreamOptions struct { + // PusherID is zero if the stream was initiated by the client. Otherwise, + // PusherID names the stream that pushed the newly opened stream. + PusherID uint32 +} + +// FrameWriteRequest is a request to write a frame. +type FrameWriteRequest struct { + // write is the interface value that does the writing, once the + // WriteScheduler has selected this frame to write. The write + // functions are all defined in write.go. + write writeFramer + + // stream is the stream on which this frame will be written. + // nil for non-stream frames like PING and SETTINGS. + stream *stream + + // done, if non-nil, must be a buffered channel with space for + // 1 message and is sent the return value from write (or an + // earlier error) when the frame has been written. + done chan error +} + +// StreamID returns the id of the stream this frame will be written to. +// 0 is used for non-stream frames such as PING and SETTINGS. +func (wr FrameWriteRequest) StreamID() uint32 { + if wr.stream == nil { + if se, ok := wr.write.(StreamError); ok { + // (*serverConn).resetStream doesn't set + // stream because it doesn't necessarily have + // one. So special case this type of write + // message. + return se.StreamID + } + return 0 + } + return wr.stream.id +} + +// DataSize returns the number of flow control bytes that must be consumed +// to write this entire frame. This is 0 for non-DATA frames. +func (wr FrameWriteRequest) DataSize() int { + if wd, ok := wr.write.(*writeData); ok { + return len(wd.p) + } + return 0 +} + +// Consume consumes min(n, available) bytes from this frame, where available +// is the number of flow control bytes available on the stream. Consume returns +// 0, 1, or 2 frames, where the integer return value gives the number of frames +// returned. +// +// If flow control prevents consuming any bytes, this returns (_, _, 0). If +// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this +// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and +// 'rest' contains the remaining bytes. The consumed bytes are deducted from the +// underlying stream's flow control budget. +func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { + var empty FrameWriteRequest + + // Non-DATA frames are always consumed whole. + wd, ok := wr.write.(*writeData) + if !ok || len(wd.p) == 0 { + return wr, empty, 1 + } + + // Might need to split after applying limits. + allowed := wr.stream.flow.available() + if n < allowed { + allowed = n + } + if wr.stream.sc.maxFrameSize < allowed { + allowed = wr.stream.sc.maxFrameSize + } + if allowed <= 0 { + return empty, empty, 0 + } + if len(wd.p) > int(allowed) { + wr.stream.flow.take(allowed) + consumed := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[:allowed], + // Even if the original had endStream set, there + // are bytes remaining because len(wd.p) > allowed, + // so we know endStream is false. + endStream: false, + }, + // Our caller is blocking on the final DATA frame, not + // this intermediate frame, so no need to wait. + done: nil, + } + rest := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[allowed:], + endStream: wd.endStream, + }, + done: wr.done, + } + return consumed, rest, 2 + } + + // The frame is consumed whole. + // NB: This cast cannot overflow because allowed is <= math.MaxInt32. + wr.stream.flow.take(int32(len(wd.p))) + return wr, empty, 1 +} + +// String is for debugging only. +func (wr FrameWriteRequest) String() string { + var des string + if s, ok := wr.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wr.write) + } + return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) +} + +// replyToWriter sends err to wr.done and panics if the send must block +// This does nothing if wr.done is nil. +func (wr *FrameWriteRequest) replyToWriter(err error) { + if wr.done == nil { + return + } + select { + case wr.done <- err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) + } + wr.write = nil // prevent use (assume it's tainted after wr.done send) +} + +// writeQueue is used by implementations of WriteScheduler. +type writeQueue struct { + s []FrameWriteRequest +} + +func (q *writeQueue) empty() bool { return len(q.s) == 0 } + +func (q *writeQueue) push(wr FrameWriteRequest) { + q.s = append(q.s, wr) +} + +func (q *writeQueue) shift() FrameWriteRequest { + if len(q.s) == 0 { + panic("invalid use of queue") + } + wr := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = FrameWriteRequest{} + q.s = q.s[:len(q.s)-1] + return wr +} + +// consume consumes up to n bytes from q.s[0]. If the frame is +// entirely consumed, it is removed from the queue. If the frame +// is partially consumed, the frame is kept with the consumed +// bytes removed. Returns true iff any bytes were consumed. +func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { + if len(q.s) == 0 { + return FrameWriteRequest{}, false + } + consumed, rest, numresult := q.s[0].Consume(n) + switch numresult { + case 0: + return FrameWriteRequest{}, false + case 1: + q.shift() + case 2: + q.s[0] = rest + } + return consumed, true +} + +type writeQueuePool []*writeQueue + +// put inserts an unused writeQueue into the pool. +func (p *writeQueuePool) put(q *writeQueue) { + for i := range q.s { + q.s[i] = FrameWriteRequest{} + } + q.s = q.s[:0] + *p = append(*p, q) +} + +// get returns an empty writeQueue. +func (p *writeQueuePool) get() *writeQueue { + ln := len(*p) + if ln == 0 { + return new(writeQueue) + } + x := ln - 1 + q := (*p)[x] + (*p)[x] = nil + *p = (*p)[:x] + return q +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go new file mode 100644 index 000000000..848fed6ec --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -0,0 +1,452 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeight = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteScheduler{ + nodes: make(map[uint32]*priorityNode), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeState int + +const ( + priorityNodeOpen priorityNodeState = iota + priorityNodeClosed + priorityNodeIdle +) + +// priorityNode is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNode struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeState // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNode + kids *priorityNode // start of the kids list + prev, next *priorityNode // doubly-linked list of siblings +} + +func (n *priorityNode) setParent(parent *priorityNode) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNode) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this funcion returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpen) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblings(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblings []*priorityNode + +func (z sortPriorityNodeSiblings) Len() int { return len(z) } +func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblings) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteScheduler struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNode + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNode + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNode + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNode + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdle { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpen + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeOpen, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpen { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosed + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + n.q.s = nil + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeIdle, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeight + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { + var n *priorityNode + if id := wr.StreamID(); id == 0 { + n = &ws.root + } else { + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. However, wr can be a RST_STREAM. In this case, we + // push wr onto the root, rather than creating a new priorityNode, + // since RST_STREAM is tiny and the stream's priority is unknown + // anyway. See issue #17919. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { + for k := n.kids; k != nil; k = k.next { + k.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority_test.go b/vendor/golang.org/x/net/http2/writesched_priority_test.go new file mode 100644 index 000000000..f2b535a2c --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority_test.go @@ -0,0 +1,541 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "sort" + "testing" +) + +func defaultPriorityWriteScheduler() *priorityWriteScheduler { + return NewPriorityWriteScheduler(nil).(*priorityWriteScheduler) +} + +func checkPriorityWellFormed(ws *priorityWriteScheduler) error { + for id, n := range ws.nodes { + if id != n.id { + return fmt.Errorf("bad ws.nodes: ws.nodes[%d] = %d", id, n.id) + } + if n.parent == nil { + if n.next != nil || n.prev != nil { + return fmt.Errorf("bad node %d: nil parent but prev/next not nil", id) + } + continue + } + found := false + for k := n.parent.kids; k != nil; k = k.next { + if k.id == id { + found = true + break + } + } + if !found { + return fmt.Errorf("bad node %d: not found in parent %d kids list", id, n.parent.id) + } + } + return nil +} + +func fmtTree(ws *priorityWriteScheduler, fmtNode func(*priorityNode) string) string { + var ids []int + for _, n := range ws.nodes { + ids = append(ids, int(n.id)) + } + sort.Ints(ids) + + var buf bytes.Buffer + for _, id := range ids { + if buf.Len() != 0 { + buf.WriteString(" ") + } + if id == 0 { + buf.WriteString(fmtNode(&ws.root)) + } else { + buf.WriteString(fmtNode(ws.nodes[uint32(id)])) + } + } + return buf.String() +} + +func fmtNodeParentSkipRoot(n *priorityNode) string { + switch { + case n.id == 0: + return "" + case n.parent == nil: + return fmt.Sprintf("%d{parent:nil}", n.id) + default: + return fmt.Sprintf("%d{parent:%d}", n.id, n.parent.id) + } +} + +func fmtNodeWeightParentSkipRoot(n *priorityNode) string { + switch { + case n.id == 0: + return "" + case n.parent == nil: + return fmt.Sprintf("%d{weight:%d,parent:nil}", n.id, n.weight) + default: + return fmt.Sprintf("%d{weight:%d,parent:%d}", n.id, n.weight, n.parent.id) + } +} + +func TestPriorityTwoStreams(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + + want := "1{weight:15,parent:0} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + + // Move 1's parent to 2. + ws.AdjustStream(1, PriorityParam{ + StreamDep: 2, + Weight: 32, + Exclusive: false, + }) + want = "1{weight:32,parent:2} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityAdjustExclusiveZero(t *testing.T) { + // 1, 2, and 3 are all children of the 0 stream. + // Exclusive reprioritization to any of the streams should bring + // the rest of the streams under the reprioritized stream. + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + ws.OpenStream(3, OpenStreamOptions{}) + + want := "1{weight:15,parent:0} 2{weight:15,parent:0} 3{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + + ws.AdjustStream(2, PriorityParam{ + StreamDep: 0, + Weight: 20, + Exclusive: true, + }) + want = "1{weight:15,parent:2} 2{weight:20,parent:0} 3{weight:15,parent:2}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityAdjustOwnParent(t *testing.T) { + // Assigning a node as its own parent should have no effect. + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + ws.AdjustStream(2, PriorityParam{ + StreamDep: 2, + Weight: 20, + Exclusive: true, + }) + want := "1{weight:15,parent:0} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityClosedStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxClosedNodesInTree: 2}).(*priorityWriteScheduler) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + + // Close the first three streams. We lose 1, but keep 2 and 3. + ws.CloseStream(1) + ws.CloseStream(2) + ws.CloseStream(3) + + want := "2{weight:15,parent:0} 3{weight:15,parent:2} 4{weight:15,parent:3}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After close\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } + + // Adding a stream as an exclusive child of 1 gives it default + // priorities, since 1 is gone. + ws.OpenStream(5, OpenStreamOptions{}) + ws.AdjustStream(5, PriorityParam{StreamDep: 1, Weight: 15, Exclusive: true}) + + // Adding a stream as an exclusive child of 2 should work, since 2 is not gone. + ws.OpenStream(6, OpenStreamOptions{}) + ws.AdjustStream(6, PriorityParam{StreamDep: 2, Weight: 15, Exclusive: true}) + + want = "2{weight:15,parent:0} 3{weight:15,parent:6} 4{weight:15,parent:3} 5{weight:15,parent:0} 6{weight:15,parent:2}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After add streams\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityClosedStreamsDisabled(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + + // Close the first two streams. We keep only 3. + ws.CloseStream(1) + ws.CloseStream(2) + + want := "3{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After close\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityIdleStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxIdleNodesInTree: 2}).(*priorityWriteScheduler) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle + ws.OpenStream(4, OpenStreamOptions{}) + ws.OpenStream(5, OpenStreamOptions{}) + ws.OpenStream(6, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{StreamDep: 1, Weight: 15}) + ws.AdjustStream(5, PriorityParam{StreamDep: 2, Weight: 15}) + ws.AdjustStream(6, PriorityParam{StreamDep: 3, Weight: 15}) + + want := "2{weight:15,parent:0} 3{weight:20,parent:2} 4{weight:15,parent:0} 5{weight:15,parent:2} 6{weight:15,parent:3}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityIdleStreamsDisabled(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle + ws.OpenStream(4, OpenStreamOptions{}) + + want := "4{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection531NonExclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.1. + // A,B,C,D = 1,2,3,4 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{ + StreamDep: 1, + Weight: 15, + Exclusive: false, + }) + want := "1{parent:0} 2{parent:1} 3{parent:1} 4{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection531Exclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.1. + // A,B,C,D = 1,2,3,4 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{ + StreamDep: 1, + Weight: 15, + Exclusive: true, + }) + want := "1{parent:0} 2{parent:4} 3{parent:4} 4{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func makeSection533Tree() *priorityWriteScheduler { + // Initial tree from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + return ws +} + +func TestPrioritySection533NonExclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + ws.AdjustStream(1, PriorityParam{ + StreamDep: 4, + Weight: 15, + Exclusive: false, + }) + want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:4}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection533Exclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + ws.AdjustStream(1, PriorityParam{ + StreamDep: 4, + Weight: 15, + Exclusive: true, + }) + want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func checkPopAll(ws WriteScheduler, order []uint32) error { + for k, id := range order { + wr, ok := ws.Pop() + if !ok { + return fmt.Errorf("Pop[%d]: got ok=false, want %d (order=%v)", k, id, order) + } + if got := wr.StreamID(); got != id { + return fmt.Errorf("Pop[%d]: got %v, want %d (order=%v)", k, got, id, order) + } + } + wr, ok := ws.Pop() + if ok { + return fmt.Errorf("Pop[%d]: got %v, want ok=false (order=%v)", len(order), wr.StreamID(), order) + } + return nil +} + +func TestPriorityPopFrom533Tree(t *testing.T) { + ws := makeSection533Tree() + + ws.Push(makeWriteHeadersRequest(3 /*C*/)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteHeadersRequest(5 /*E*/)) + ws.Push(makeWriteHeadersRequest(1 /*A*/)) + t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot)) + + if err := checkPopAll(ws, []uint32{0 /*NonStream*/, 1, 3, 5}); err != nil { + t.Error(err) + } +} + +func TestPriorityPopFromLinearTree(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + + ws.Push(makeWriteHeadersRequest(3)) + ws.Push(makeWriteHeadersRequest(4)) + ws.Push(makeWriteHeadersRequest(1)) + ws.Push(makeWriteHeadersRequest(2)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteNonStreamRequest()) + t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot)) + + if err := checkPopAll(ws, []uint32{0, 0 /*NonStreams*/, 1, 2, 3, 4}); err != nil { + t.Error(err) + } +} + +func TestPriorityFlowControl(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: false}) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + + sc := &serverConn{maxFrameSize: 16} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 16), false}, st1, nil}) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 16), false}, st2, nil}) + ws.AdjustStream(2, PriorityParam{StreamDep: 1}) + + // No flow-control bytes available. + if wr, ok := ws.Pop(); ok { + t.Fatalf("Pop(limited by flow control)=%v,true, want false", wr) + } + + // Add enough flow-control bytes to write st2 in two Pop calls. + // Should write data from st2 even though it's lower priority than st1. + for i := 1; i <= 2; i++ { + st2.flow.add(8) + wr, ok := ws.Pop() + if !ok { + t.Fatalf("Pop(%d)=false, want true", i) + } + if got, want := wr.DataSize(), 8; got != want { + t.Fatalf("Pop(%d)=%d bytes, want %d bytes", i, got, want) + } + } +} + +func TestPriorityThrottleOutOfOrderWrites(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: true}) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + + sc := &serverConn{maxFrameSize: 4096} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + st1.flow.add(4096) + st2.flow.add(4096) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 4096), false}, st2, nil}) + ws.AdjustStream(2, PriorityParam{StreamDep: 1}) + + // We have enough flow-control bytes to write st2 in a single Pop call. + // However, due to out-of-order write throttling, the first call should + // only write 1KB. + wr, ok := ws.Pop() + if !ok { + t.Fatalf("Pop(st2.first)=false, want true") + } + if got, want := wr.StreamID(), uint32(2); got != want { + t.Fatalf("Pop(st2.first)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 1024; got != want { + t.Fatalf("Pop(st2.first)=%d bytes, want %d bytes", got, want) + } + + // Now add data on st1. This should take precedence. + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 4096), false}, st1, nil}) + wr, ok = ws.Pop() + if !ok { + t.Fatalf("Pop(st1)=false, want true") + } + if got, want := wr.StreamID(), uint32(1); got != want { + t.Fatalf("Pop(st1)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 4096; got != want { + t.Fatalf("Pop(st1)=%d bytes, want %d bytes", got, want) + } + + // Should go back to writing 1KB from st2. + wr, ok = ws.Pop() + if !ok { + t.Fatalf("Pop(st2.last)=false, want true") + } + if got, want := wr.StreamID(), uint32(2); got != want { + t.Fatalf("Pop(st2.last)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 1024; got != want { + t.Fatalf("Pop(st2.last)=%d bytes, want %d bytes", got, want) + } +} + +func TestPriorityWeights(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + + sc := &serverConn{maxFrameSize: 8} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + st1.flow.add(40) + st2.flow.add(40) + + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 40), false}, st1, nil}) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 40), false}, st2, nil}) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 34}) + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 9}) + + // st1 gets 3.5x the bandwidth of st2 (3.5 = (34+1)/(9+1)). + // The maximum frame size is 8 bytes. The write sequence should be: + // st1, total bytes so far is (st1=8, st=0) + // st2, total bytes so far is (st1=8, st=8) + // st1, total bytes so far is (st1=16, st=8) + // st1, total bytes so far is (st1=24, st=8) // 3x bandwidth + // st1, total bytes so far is (st1=32, st=8) // 4x bandwidth + // st2, total bytes so far is (st1=32, st=16) // 2x bandwidth + // st1, total bytes so far is (st1=40, st=16) + // st2, total bytes so far is (st1=40, st=24) + // st2, total bytes so far is (st1=40, st=32) + // st2, total bytes so far is (st1=40, st=40) + if err := checkPopAll(ws, []uint32{1, 2, 1, 1, 1, 2, 1, 2, 2, 2}); err != nil { + t.Error(err) + } +} + +func TestPriorityRstStreamOnNonOpenStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 0, + MaxIdleNodesInTree: 0, + }) + ws.OpenStream(1, OpenStreamOptions{}) + ws.CloseStream(1) + ws.Push(FrameWriteRequest{write: streamError(1, ErrCodeProtocol)}) + ws.Push(FrameWriteRequest{write: streamError(2, ErrCodeProtocol)}) + + if err := checkPopAll(ws, []uint32{1, 2}); err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go new file mode 100644 index 000000000..36d7919f1 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "math" + +// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 +// priorities. Control frames like SETTINGS and PING are written before DATA +// frames, but if no control frames are queued and multiple streams have queued +// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +func NewRandomWriteScheduler() WriteScheduler { + return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} +} + +type randomWriteScheduler struct { + // zero are frames not associated with a specific stream. + zero writeQueue + + // sq contains the stream-specific queues, keyed by stream ID. + // When a stream is idle or closed, it's deleted from the map. + sq map[uint32]*writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // no-op: idle streams are not tracked +} + +func (ws *randomWriteScheduler) CloseStream(streamID uint32) { + q, ok := ws.sq[streamID] + if !ok { + return + } + delete(ws.sq, streamID) + ws.queuePool.put(q) +} + +func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + // no-op: priorities are ignored +} + +func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { + id := wr.StreamID() + if id == 0 { + ws.zero.push(wr) + return + } + q, ok := ws.sq[id] + if !ok { + q = ws.queuePool.get() + ws.sq[id] = q + } + q.push(wr) +} + +func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control frames first. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + // Iterate over all non-idle streams until finding one that can be consumed. + for _, q := range ws.sq { + if wr, ok := q.consume(math.MaxInt32); ok { + return wr, true + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/http2/writesched_random_test.go b/vendor/golang.org/x/net/http2/writesched_random_test.go new file mode 100644 index 000000000..3bf4aa36a --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random_test.go @@ -0,0 +1,44 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "testing" + +func TestRandomScheduler(t *testing.T) { + ws := NewRandomWriteScheduler() + ws.Push(makeWriteHeadersRequest(3)) + ws.Push(makeWriteHeadersRequest(4)) + ws.Push(makeWriteHeadersRequest(1)) + ws.Push(makeWriteHeadersRequest(2)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteNonStreamRequest()) + + // Pop all frames. Should get the non-stream requests first, + // followed by the stream requests in any order. + var order []FrameWriteRequest + for { + wr, ok := ws.Pop() + if !ok { + break + } + order = append(order, wr) + } + t.Logf("got frames: %v", order) + if len(order) != 6 { + t.Fatalf("got %d frames, expected 6", len(order)) + } + if order[0].StreamID() != 0 || order[1].StreamID() != 0 { + t.Fatal("expected non-stream frames first", order[0], order[1]) + } + got := make(map[uint32]bool) + for _, wr := range order[2:] { + got[wr.StreamID()] = true + } + for id := uint32(1); id <= 4; id++ { + if !got[id] { + t.Errorf("frame not found for stream %d", id) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched_test.go b/vendor/golang.org/x/net/http2/writesched_test.go new file mode 100644 index 000000000..0807056bc --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_test.go @@ -0,0 +1,125 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "reflect" + "testing" +) + +func makeWriteNonStreamRequest() FrameWriteRequest { + return FrameWriteRequest{writeSettingsAck{}, nil, nil} +} + +func makeWriteHeadersRequest(streamID uint32) FrameWriteRequest { + st := &stream{id: streamID} + return FrameWriteRequest{&writeResHeaders{streamID: streamID, httpResCode: 200}, st, nil} +} + +func checkConsume(wr FrameWriteRequest, nbytes int32, want []FrameWriteRequest) error { + consumed, rest, n := wr.Consume(nbytes) + var wantConsumed, wantRest FrameWriteRequest + switch len(want) { + case 0: + case 1: + wantConsumed = want[0] + case 2: + wantConsumed = want[0] + wantRest = want[1] + } + if !reflect.DeepEqual(consumed, wantConsumed) || !reflect.DeepEqual(rest, wantRest) || n != len(want) { + return fmt.Errorf("got %v, %v, %v\nwant %v, %v, %v", consumed, rest, n, wantConsumed, wantRest, len(want)) + } + return nil +} + +func TestFrameWriteRequestNonData(t *testing.T) { + wr := makeWriteNonStreamRequest() + if got, want := wr.DataSize(), 0; got != want { + t.Errorf("DataSize: got %v, want %v", got, want) + } + + // Non-DATA frames are always consumed whole. + if err := checkConsume(wr, 0, []FrameWriteRequest{wr}); err != nil { + t.Errorf("Consume:\n%v", err) + } +} + +func TestFrameWriteRequestData(t *testing.T) { + st := &stream{ + id: 1, + sc: &serverConn{maxFrameSize: 16}, + } + const size = 32 + wr := FrameWriteRequest{&writeData{st.id, make([]byte, size), true}, st, make(chan error)} + if got, want := wr.DataSize(), size; got != want { + t.Errorf("DataSize: got %v, want %v", got, want) + } + + // No flow-control bytes available: cannot consume anything. + if err := checkConsume(wr, math.MaxInt32, []FrameWriteRequest{}); err != nil { + t.Errorf("Consume(limited by flow control):\n%v", err) + } + + // Add enough flow-control bytes to consume the entire frame, + // but we're now restricted by st.sc.maxFrameSize. + st.flow.add(size) + want := []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, st.sc.maxFrameSize), false}, + stream: st, + done: nil, + }, + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(wr, math.MaxInt32, want); err != nil { + t.Errorf("Consume(limited by maxFrameSize):\n%v", err) + } + rest := want[1] + + // Consume 8 bytes from the remaining frame. + want = []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, 8), false}, + stream: st, + done: nil, + }, + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(rest, 8, want); err != nil { + t.Errorf("Consume(8):\n%v", err) + } + rest = want[1] + + // Consume all remaining bytes. + want = []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(rest, math.MaxInt32, want); err != nil { + t.Errorf("Consume(remainder):\n%v", err) + } +} + +func TestFrameWriteRequest_StreamID(t *testing.T) { + const streamID = 123 + wr := FrameWriteRequest{write: streamError(streamID, ErrCodeNo)} + if got := wr.StreamID(); got != streamID { + t.Errorf("FrameWriteRequest(StreamError) = %v; want %v", got, streamID) + } +} diff --git a/vendor/golang.org/x/net/http2/z_spec_test.go b/vendor/golang.org/x/net/http2/z_spec_test.go new file mode 100644 index 000000000..610b2cdbc --- /dev/null +++ b/vendor/golang.org/x/net/http2/z_spec_test.go @@ -0,0 +1,356 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "encoding/xml" + "flag" + "fmt" + "io" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "testing" +) + +var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests") + +// The global map of sentence coverage for the http2 spec. +var defaultSpecCoverage specCoverage + +var loadSpecOnce sync.Once + +func loadSpec() { + if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil { + panic(err) + } else { + defaultSpecCoverage = readSpecCov(f) + f.Close() + } +} + +// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not +// "covered" will be included in report outputted by TestSpecCoverage. +func covers(sec, sentences string) { + loadSpecOnce.Do(loadSpec) + defaultSpecCoverage.cover(sec, sentences) +} + +type specPart struct { + section string + sentence string +} + +func (ss specPart) Less(oo specPart) bool { + atoi := func(s string) int { + n, err := strconv.Atoi(s) + if err != nil { + panic(err) + } + return n + } + a := strings.Split(ss.section, ".") + b := strings.Split(oo.section, ".") + for len(a) > 0 { + if len(b) == 0 { + return false + } + x, y := atoi(a[0]), atoi(b[0]) + if x == y { + a, b = a[1:], b[1:] + continue + } + return x < y + } + if len(b) > 0 { + return true + } + return false +} + +type bySpecSection []specPart + +func (a bySpecSection) Len() int { return len(a) } +func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) } +func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type specCoverage struct { + coverage map[specPart]bool + d *xml.Decoder +} + +func joinSection(sec []int) string { + s := fmt.Sprintf("%d", sec[0]) + for _, n := range sec[1:] { + s = fmt.Sprintf("%s.%d", s, n) + } + return s +} + +func (sc specCoverage) readSection(sec []int) { + var ( + buf = new(bytes.Buffer) + sub = 0 + ) + for { + tk, err := sc.d.Token() + if err != nil { + if err == io.EOF { + return + } + panic(err) + } + switch v := tk.(type) { + case xml.StartElement: + if skipElement(v) { + if err := sc.d.Skip(); err != nil { + panic(err) + } + if v.Name.Local == "section" { + sub++ + } + break + } + switch v.Name.Local { + case "section": + sub++ + sc.readSection(append(sec, sub)) + case "xref": + buf.Write(sc.readXRef(v)) + } + case xml.CharData: + if len(sec) == 0 { + break + } + buf.Write(v) + case xml.EndElement: + if v.Name.Local == "section" { + sc.addSentences(joinSection(sec), buf.String()) + return + } + } + } +} + +func (sc specCoverage) readXRef(se xml.StartElement) []byte { + var b []byte + for { + tk, err := sc.d.Token() + if err != nil { + panic(err) + } + switch v := tk.(type) { + case xml.CharData: + if b != nil { + panic("unexpected CharData") + } + b = []byte(string(v)) + case xml.EndElement: + if v.Name.Local != "xref" { + panic("expected ") + } + if b != nil { + return b + } + sig := attrSig(se) + switch sig { + case "target": + return []byte(fmt.Sprintf("[%s]", attrValue(se, "target"))) + case "fmt-of,rel,target", "fmt-,,rel,target": + return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel"))) + case "fmt-of,sec,target", "fmt-,,sec,target": + return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target"))) + case "fmt-of,rel,sec,target": + return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel"))) + default: + panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se))) + } + default: + panic(fmt.Sprintf("unexpected tag %q", v)) + } + } +} + +var skipAnchor = map[string]bool{ + "intro": true, + "Overview": true, +} + +var skipTitle = map[string]bool{ + "Acknowledgements": true, + "Change Log": true, + "Document Organization": true, + "Conventions and Terminology": true, +} + +func skipElement(s xml.StartElement) bool { + switch s.Name.Local { + case "artwork": + return true + case "section": + for _, attr := range s.Attr { + switch attr.Name.Local { + case "anchor": + if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") { + return true + } + case "title": + if skipTitle[attr.Value] { + return true + } + } + } + } + return false +} + +func readSpecCov(r io.Reader) specCoverage { + sc := specCoverage{ + coverage: map[specPart]bool{}, + d: xml.NewDecoder(r)} + sc.readSection(nil) + return sc +} + +func (sc specCoverage) addSentences(sec string, sentence string) { + for _, s := range parseSentences(sentence) { + sc.coverage[specPart{sec, s}] = false + } +} + +func (sc specCoverage) cover(sec string, sentence string) { + for _, s := range parseSentences(sentence) { + p := specPart{sec, s} + if _, ok := sc.coverage[p]; !ok { + panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s)) + } + sc.coverage[specPart{sec, s}] = true + } + +} + +var whitespaceRx = regexp.MustCompile(`\s+`) + +func parseSentences(sens string) []string { + sens = strings.TrimSpace(sens) + if sens == "" { + return nil + } + ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ") + for i, s := range ss { + s = strings.TrimSpace(s) + if !strings.HasSuffix(s, ".") { + s += "." + } + ss[i] = s + } + return ss +} + +func TestSpecParseSentences(t *testing.T) { + tests := []struct { + ss string + want []string + }{ + {"Sentence 1. Sentence 2.", + []string{ + "Sentence 1.", + "Sentence 2.", + }}, + {"Sentence 1. \nSentence 2.\tSentence 3.", + []string{ + "Sentence 1.", + "Sentence 2.", + "Sentence 3.", + }}, + } + + for i, tt := range tests { + got := parseSentences(tt.ss) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d: got = %q, want %q", i, got, tt.want) + } + } +} + +func TestSpecCoverage(t *testing.T) { + if !*coverSpec { + t.Skip() + } + + loadSpecOnce.Do(loadSpec) + + var ( + list []specPart + cv = defaultSpecCoverage.coverage + total = len(cv) + complete = 0 + ) + + for sp, touched := range defaultSpecCoverage.coverage { + if touched { + complete++ + } else { + list = append(list, sp) + } + } + sort.Stable(bySpecSection(list)) + + if testing.Short() && len(list) > 5 { + list = list[:5] + } + + for _, p := range list { + t.Errorf("\tSECTION %s: %s", p.section, p.sentence) + } + + t.Logf("%d/%d (%d%%) sentences covered", complete, total, (complete/total)*100) +} + +func attrSig(se xml.StartElement) string { + var names []string + for _, attr := range se.Attr { + if attr.Name.Local == "fmt" { + names = append(names, "fmt-"+attr.Value) + } else { + names = append(names, attr.Name.Local) + } + } + sort.Strings(names) + return strings.Join(names, ",") +} + +func attrValue(se xml.StartElement, attr string) string { + for _, a := range se.Attr { + if a.Name.Local == attr { + return a.Value + } + } + panic("unknown attribute " + attr) +} + +func TestSpecPartLess(t *testing.T) { + tests := []struct { + sec1, sec2 string + want bool + }{ + {"6.2.1", "6.2", false}, + {"6.2", "6.2.1", true}, + {"6.10", "6.10.1", true}, + {"6.10", "6.1.1", false}, // 10, not 1 + {"6.1", "6.1", false}, // equal, so not less + } + for _, tt := range tests { + got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"}) + if got != tt.want { + t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/dstunreach.go b/vendor/golang.org/x/net/icmp/dstunreach.go new file mode 100644 index 000000000..75db991df --- /dev/null +++ b/vendor/golang.org/x/net/icmp/dstunreach.go @@ -0,0 +1,41 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A DstUnreach represents an ICMP destination unreachable message +// body. +type DstUnreach struct { + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *DstUnreach) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *DstUnreach) Marshal(proto int) ([]byte, error) { + return marshalMultipartMessageBody(proto, p.Data, p.Extensions) +} + +// parseDstUnreach parses b as an ICMP destination unreachable message +// body. +func parseDstUnreach(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &DstUnreach{} + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/echo.go b/vendor/golang.org/x/net/icmp/echo.go new file mode 100644 index 000000000..e6f15efd7 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/echo.go @@ -0,0 +1,45 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// An Echo represents an ICMP echo request or reply message body. +type Echo struct { + ID int // identifier + Seq int // sequence number + Data []byte // data +} + +// Len implements the Len method of MessageBody interface. +func (p *Echo) Len(proto int) int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *Echo) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + binary.BigEndian.PutUint16(b[:2], uint16(p.ID)) + binary.BigEndian.PutUint16(b[2:4], uint16(p.Seq)) + copy(b[4:], p.Data) + return b, nil +} + +// parseEcho parses b as an ICMP echo request or reply message body. +func parseEcho(proto int, b []byte) (MessageBody, error) { + bodyLen := len(b) + if bodyLen < 4 { + return nil, errMessageTooShort + } + p := &Echo{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(binary.BigEndian.Uint16(b[2:4]))} + if bodyLen > 4 { + p.Data = make([]byte, bodyLen-4) + copy(p.Data, b[4:]) + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/endpoint.go b/vendor/golang.org/x/net/icmp/endpoint.go new file mode 100644 index 000000000..a68bfb010 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/endpoint.go @@ -0,0 +1,113 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + "runtime" + "syscall" + "time" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var _ net.PacketConn = &PacketConn{} + +// A PacketConn represents a packet network endpoint that uses either +// ICMPv4 or ICMPv6. +type PacketConn struct { + c net.PacketConn + p4 *ipv4.PacketConn + p6 *ipv6.PacketConn +} + +func (c *PacketConn) ok() bool { return c != nil && c.c != nil } + +// IPv4PacketConn returns the ipv4.PacketConn of c. +// It returns nil when c is not created as the endpoint for ICMPv4. +func (c *PacketConn) IPv4PacketConn() *ipv4.PacketConn { + if !c.ok() { + return nil + } + return c.p4 +} + +// IPv6PacketConn returns the ipv6.PacketConn of c. +// It returns nil when c is not created as the endpoint for ICMPv6. +func (c *PacketConn) IPv6PacketConn() *ipv6.PacketConn { + if !c.ok() { + return nil + } + return c.p6 +} + +// ReadFrom reads an ICMP message from the connection. +func (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) { + if !c.ok() { + return 0, nil, syscall.EINVAL + } + // Please be informed that ipv4.NewPacketConn enables + // IP_STRIPHDR option by default on Darwin. + // See golang.org/issue/9395 for further information. + if runtime.GOOS == "darwin" && c.p4 != nil { + n, _, peer, err := c.p4.ReadFrom(b) + return n, peer, err + } + return c.c.ReadFrom(b) +} + +// WriteTo writes the ICMP message b to dst. +// Dst must be net.UDPAddr when c is a non-privileged +// datagram-oriented ICMP endpoint. Otherwise it must be net.IPAddr. +func (c *PacketConn) WriteTo(b []byte, dst net.Addr) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.c.WriteTo(b, dst) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.Close() +} + +// LocalAddr returns the local network address. +func (c *PacketConn) LocalAddr() net.Addr { + if !c.ok() { + return nil + } + return c.c.LocalAddr() +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetWriteDeadline(t) +} diff --git a/vendor/golang.org/x/net/icmp/example_test.go b/vendor/golang.org/x/net/icmp/example_test.go new file mode 100644 index 000000000..1df4ceccd --- /dev/null +++ b/vendor/golang.org/x/net/icmp/example_test.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "log" + "net" + "os" + "runtime" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv6" +) + +func ExamplePacketConn_nonPrivilegedPing() { + switch runtime.GOOS { + case "darwin": + case "linux": + log.Println("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + log.Println("not supported on", runtime.GOOS) + return + } + + c, err := icmp.ListenPacket("udp6", "fe80::1%en0") + if err != nil { + log.Fatal(err) + } + defer c.Close() + + wm := icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: 1, + Data: []byte("HELLO-R-U-THERE"), + }, + } + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + if _, err := c.WriteTo(wb, &net.UDPAddr{IP: net.ParseIP("ff02::1"), Zone: "en0"}); err != nil { + log.Fatal(err) + } + + rb := make([]byte, 1500) + n, peer, err := c.ReadFrom(rb) + if err != nil { + log.Fatal(err) + } + rm, err := icmp.ParseMessage(58, rb[:n]) + if err != nil { + log.Fatal(err) + } + switch rm.Type { + case ipv6.ICMPTypeEchoReply: + log.Printf("got reflection from %v", peer) + default: + log.Printf("got %+v; want echo reply", rm) + } +} diff --git a/vendor/golang.org/x/net/icmp/extension.go b/vendor/golang.org/x/net/icmp/extension.go new file mode 100644 index 000000000..402a7514b --- /dev/null +++ b/vendor/golang.org/x/net/icmp/extension.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// An Extension represents an ICMP extension. +type Extension interface { + // Len returns the length of ICMP extension. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Len(proto int) int + + // Marshal returns the binary encoding of ICMP extension. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Marshal(proto int) ([]byte, error) +} + +const extensionVersion = 2 + +func validExtensionHeader(b []byte) bool { + v := int(b[0]&0xf0) >> 4 + s := binary.BigEndian.Uint16(b[2:4]) + if s != 0 { + s = checksum(b) + } + if v != extensionVersion || s != 0 { + return false + } + return true +} + +// parseExtensions parses b as a list of ICMP extensions. +// The length attribute l must be the length attribute field in +// received icmp messages. +// +// It will return a list of ICMP extensions and an adjusted length +// attribute that represents the length of the padded original +// datagram field. Otherwise, it returns an error. +func parseExtensions(b []byte, l int) ([]Extension, int, error) { + // Still a lot of non-RFC 4884 compliant implementations are + // out there. Set the length attribute l to 128 when it looks + // inappropriate for backwards compatibility. + // + // A minimal extension at least requires 8 octets; 4 octets + // for an extension header, and 4 octets for a single object + // header. + // + // See RFC 4884 for further information. + if 128 > l || l+8 > len(b) { + l = 128 + } + if l+8 > len(b) { + return nil, -1, errNoExtension + } + if !validExtensionHeader(b[l:]) { + if l == 128 { + return nil, -1, errNoExtension + } + l = 128 + if !validExtensionHeader(b[l:]) { + return nil, -1, errNoExtension + } + } + var exts []Extension + for b = b[l+4:]; len(b) >= 4; { + ol := int(binary.BigEndian.Uint16(b[:2])) + if 4 > ol || ol > len(b) { + break + } + switch b[2] { + case classMPLSLabelStack: + ext, err := parseMPLSLabelStack(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + case classInterfaceInfo: + ext, err := parseInterfaceInfo(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + } + b = b[ol:] + } + return exts, l, nil +} diff --git a/vendor/golang.org/x/net/icmp/extension_test.go b/vendor/golang.org/x/net/icmp/extension_test.go new file mode 100644 index 000000000..0b3f7b9e1 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/extension_test.go @@ -0,0 +1,259 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + "reflect" + "testing" + + "golang.org/x/net/internal/iana" +) + +var marshalAndParseExtensionTests = []struct { + proto int + hdr []byte + obj []byte + exts []Extension +}{ + // MPLS label stack with no label + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x04, 0x01, 0x01, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + }, + }, + }, + // MPLS label stack with a single label + { + proto: iana.ProtocolIPv6ICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x08, 0x01, 0x01, + 0x03, 0xe8, 0xe9, 0xff, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + Labels: []MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + // MPLS label stack with multiple labels + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x0c, 0x01, 0x01, + 0x03, 0xe8, 0xde, 0xfe, + 0x03, 0xe8, 0xe1, 0xff, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + Labels: []MPLSLabel{ + { + Label: 16013, + TC: 0x7, + S: false, + TTL: 254, + }, + { + Label: 16014, + TC: 0, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + // Interface information with no attribute + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x04, 0x02, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + }, + }, + }, + // Interface information with ifIndex and name + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x10, 0x02, 0x0a, + 0x00, 0x00, 0x00, 0x10, + 0x08, byte('e'), byte('n'), byte('1'), + byte('0'), byte('1'), 0x00, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + Type: 0x0a, + Interface: &net.Interface{ + Index: 16, + Name: "en101", + }, + }, + }, + }, + // Interface information with ifIndex, IPAddr, name and MTU + { + proto: iana.ProtocolIPv6ICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x28, 0x02, 0x0f, + 0x00, 0x00, 0x00, 0x0f, + 0x00, 0x02, 0x00, 0x00, + 0xfe, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x08, byte('e'), byte('n'), byte('1'), + byte('0'), byte('1'), 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + }, + }, +} + +func TestMarshalAndParseExtension(t *testing.T) { + for i, tt := range marshalAndParseExtensionTests { + for j, ext := range tt.exts { + var err error + var b []byte + switch ext := ext.(type) { + case *MPLSLabelStack: + b, err = ext.Marshal(tt.proto) + if err != nil { + t.Errorf("#%v/%v: %v", i, j, err) + continue + } + case *InterfaceInfo: + b, err = ext.Marshal(tt.proto) + if err != nil { + t.Errorf("#%v/%v: %v", i, j, err) + continue + } + } + if !reflect.DeepEqual(b, tt.obj) { + t.Errorf("#%v/%v: got %#v; want %#v", i, j, b, tt.obj) + continue + } + } + + for j, wire := range []struct { + data []byte // original datagram + inlattr int // length of padded original datagram, a hint + outlattr int // length of padded original datagram, a want + err error + }{ + {nil, 0, -1, errNoExtension}, + {make([]byte, 127), 128, -1, errNoExtension}, + + {make([]byte, 128), 127, -1, errNoExtension}, + {make([]byte, 128), 128, -1, errNoExtension}, + {make([]byte, 128), 129, -1, errNoExtension}, + + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 127, 128, nil}, + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 128, 128, nil}, + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 129, 128, nil}, + + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 511, -1, errNoExtension}, + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 512, 512, nil}, + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 513, -1, errNoExtension}, + } { + exts, l, err := parseExtensions(wire.data, wire.inlattr) + if err != wire.err { + t.Errorf("#%v/%v: got %v; want %v", i, j, err, wire.err) + continue + } + if wire.err != nil { + continue + } + if l != wire.outlattr { + t.Errorf("#%v/%v: got %v; want %v", i, j, l, wire.outlattr) + } + if !reflect.DeepEqual(exts, tt.exts) { + for j, ext := range exts { + switch ext := ext.(type) { + case *MPLSLabelStack: + want := tt.exts[j].(*MPLSLabelStack) + t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want) + case *InterfaceInfo: + want := tt.exts[j].(*InterfaceInfo) + t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want) + } + } + continue + } + } + } +} + +var parseInterfaceNameTests = []struct { + b []byte + error +}{ + {[]byte{0, 'e', 'n', '0'}, errInvalidExtension}, + {[]byte{4, 'e', 'n', '0'}, nil}, + {[]byte{7, 'e', 'n', '0', 0xff, 0xff, 0xff, 0xff}, errInvalidExtension}, + {[]byte{8, 'e', 'n', '0', 0xff, 0xff, 0xff}, errMessageTooShort}, +} + +func TestParseInterfaceName(t *testing.T) { + ifi := InterfaceInfo{Interface: &net.Interface{}} + for i, tt := range parseInterfaceNameTests { + if _, err := ifi.parseName(tt.b); err != tt.error { + t.Errorf("#%d: got %v; want %v", i, err, tt.error) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/helper_posix.go b/vendor/golang.org/x/net/icmp/helper_posix.go new file mode 100644 index 000000000..398fd388f --- /dev/null +++ b/vendor/golang.org/x/net/icmp/helper_posix.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package icmp + +import ( + "net" + "strconv" + "syscall" +) + +func sockaddr(family int, address string) (syscall.Sockaddr, error) { + switch family { + case syscall.AF_INET: + a, err := net.ResolveIPAddr("ip4", address) + if err != nil { + return nil, err + } + if len(a.IP) == 0 { + a.IP = net.IPv4zero + } + if a.IP = a.IP.To4(); a.IP == nil { + return nil, net.InvalidAddrError("non-ipv4 address") + } + sa := &syscall.SockaddrInet4{} + copy(sa.Addr[:], a.IP) + return sa, nil + case syscall.AF_INET6: + a, err := net.ResolveIPAddr("ip6", address) + if err != nil { + return nil, err + } + if len(a.IP) == 0 { + a.IP = net.IPv6unspecified + } + if a.IP.Equal(net.IPv4zero) { + a.IP = net.IPv6unspecified + } + if a.IP = a.IP.To16(); a.IP == nil || a.IP.To4() != nil { + return nil, net.InvalidAddrError("non-ipv6 address") + } + sa := &syscall.SockaddrInet6{ZoneId: zoneToUint32(a.Zone)} + copy(sa.Addr[:], a.IP) + return sa, nil + default: + return nil, net.InvalidAddrError("unexpected family") + } +} + +func zoneToUint32(zone string) uint32 { + if zone == "" { + return 0 + } + if ifi, err := net.InterfaceByName(zone); err == nil { + return uint32(ifi.Index) + } + n, err := strconv.Atoi(zone) + if err != nil { + return 0 + } + return uint32(n) +} + +func last(s string, b byte) int { + i := len(s) + for i--; i >= 0; i-- { + if s[i] == b { + break + } + } + return i +} diff --git a/vendor/golang.org/x/net/icmp/interface.go b/vendor/golang.org/x/net/icmp/interface.go new file mode 100644 index 000000000..78b5b98bf --- /dev/null +++ b/vendor/golang.org/x/net/icmp/interface.go @@ -0,0 +1,236 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "strings" + + "golang.org/x/net/internal/iana" +) + +const ( + classInterfaceInfo = 2 + + afiIPv4 = 1 + afiIPv6 = 2 +) + +const ( + attrMTU = 1 << iota + attrName + attrIPAddr + attrIfIndex +) + +// An InterfaceInfo represents interface and next-hop identification. +type InterfaceInfo struct { + Class int // extension object class number + Type int // extension object sub-type + Interface *net.Interface + Addr *net.IPAddr +} + +func (ifi *InterfaceInfo) nameLen() int { + if len(ifi.Interface.Name) > 63 { + return 64 + } + l := 1 + len(ifi.Interface.Name) + return (l + 3) &^ 3 +} + +func (ifi *InterfaceInfo) attrsAndLen(proto int) (attrs, l int) { + l = 4 + if ifi.Interface != nil && ifi.Interface.Index > 0 { + attrs |= attrIfIndex + l += 4 + if len(ifi.Interface.Name) > 0 { + attrs |= attrName + l += ifi.nameLen() + } + if ifi.Interface.MTU > 0 { + attrs |= attrMTU + l += 4 + } + } + if ifi.Addr != nil { + switch proto { + case iana.ProtocolICMP: + if ifi.Addr.IP.To4() != nil { + attrs |= attrIPAddr + l += 4 + net.IPv4len + } + case iana.ProtocolIPv6ICMP: + if ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { + attrs |= attrIPAddr + l += 4 + net.IPv6len + } + } + } + return +} + +// Len implements the Len method of Extension interface. +func (ifi *InterfaceInfo) Len(proto int) int { + _, l := ifi.attrsAndLen(proto) + return l +} + +// Marshal implements the Marshal method of Extension interface. +func (ifi *InterfaceInfo) Marshal(proto int) ([]byte, error) { + attrs, l := ifi.attrsAndLen(proto) + b := make([]byte, l) + if err := ifi.marshal(proto, b, attrs, l); err != nil { + return nil, err + } + return b, nil +} + +func (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error { + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classInterfaceInfo, byte(ifi.Type) + for b = b[4:]; len(b) > 0 && attrs != 0; { + switch { + case attrs&attrIfIndex != 0: + b = ifi.marshalIfIndex(proto, b) + attrs &^= attrIfIndex + case attrs&attrIPAddr != 0: + b = ifi.marshalIPAddr(proto, b) + attrs &^= attrIPAddr + case attrs&attrName != 0: + b = ifi.marshalName(proto, b) + attrs &^= attrName + case attrs&attrMTU != 0: + b = ifi.marshalMTU(proto, b) + attrs &^= attrMTU + } + } + return nil +} + +func (ifi *InterfaceInfo) marshalIfIndex(proto int, b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.Index)) + return b[4:] +} + +func (ifi *InterfaceInfo) parseIfIndex(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + ifi.Interface.Index = int(binary.BigEndian.Uint32(b[:4])) + return b[4:], nil +} + +func (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte { + switch proto { + case iana.ProtocolICMP: + binary.BigEndian.PutUint16(b[:2], uint16(afiIPv4)) + copy(b[4:4+net.IPv4len], ifi.Addr.IP.To4()) + b = b[4+net.IPv4len:] + case iana.ProtocolIPv6ICMP: + binary.BigEndian.PutUint16(b[:2], uint16(afiIPv6)) + copy(b[4:4+net.IPv6len], ifi.Addr.IP.To16()) + b = b[4+net.IPv6len:] + } + return b +} + +func (ifi *InterfaceInfo) parseIPAddr(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + afi := int(binary.BigEndian.Uint16(b[:2])) + b = b[4:] + switch afi { + case afiIPv4: + if len(b) < net.IPv4len { + return nil, errMessageTooShort + } + ifi.Addr.IP = make(net.IP, net.IPv4len) + copy(ifi.Addr.IP, b[:net.IPv4len]) + b = b[net.IPv4len:] + case afiIPv6: + if len(b) < net.IPv6len { + return nil, errMessageTooShort + } + ifi.Addr.IP = make(net.IP, net.IPv6len) + copy(ifi.Addr.IP, b[:net.IPv6len]) + b = b[net.IPv6len:] + } + return b, nil +} + +func (ifi *InterfaceInfo) marshalName(proto int, b []byte) []byte { + l := byte(ifi.nameLen()) + b[0] = l + copy(b[1:], []byte(ifi.Interface.Name)) + return b[l:] +} + +func (ifi *InterfaceInfo) parseName(b []byte) ([]byte, error) { + if 4 > len(b) || len(b) < int(b[0]) { + return nil, errMessageTooShort + } + l := int(b[0]) + if l%4 != 0 || 4 > l || l > 64 { + return nil, errInvalidExtension + } + var name [63]byte + copy(name[:], b[1:l]) + ifi.Interface.Name = strings.Trim(string(name[:]), "\000") + return b[l:], nil +} + +func (ifi *InterfaceInfo) marshalMTU(proto int, b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.MTU)) + return b[4:] +} + +func (ifi *InterfaceInfo) parseMTU(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + ifi.Interface.MTU = int(binary.BigEndian.Uint32(b[:4])) + return b[4:], nil +} + +func parseInterfaceInfo(b []byte) (Extension, error) { + ifi := &InterfaceInfo{ + Class: int(b[2]), + Type: int(b[3]), + } + if ifi.Type&(attrIfIndex|attrName|attrMTU) != 0 { + ifi.Interface = &net.Interface{} + } + if ifi.Type&attrIPAddr != 0 { + ifi.Addr = &net.IPAddr{} + } + attrs := ifi.Type & (attrIfIndex | attrIPAddr | attrName | attrMTU) + for b = b[4:]; len(b) > 0 && attrs != 0; { + var err error + switch { + case attrs&attrIfIndex != 0: + b, err = ifi.parseIfIndex(b) + attrs &^= attrIfIndex + case attrs&attrIPAddr != 0: + b, err = ifi.parseIPAddr(b) + attrs &^= attrIPAddr + case attrs&attrName != 0: + b, err = ifi.parseName(b) + attrs &^= attrName + case attrs&attrMTU != 0: + b, err = ifi.parseMTU(b) + attrs &^= attrMTU + } + if err != nil { + return nil, err + } + } + if ifi.Interface != nil && ifi.Interface.Name != "" && ifi.Addr != nil && ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { + ifi.Addr.Zone = ifi.Interface.Name + } + return ifi, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv4.go b/vendor/golang.org/x/net/icmp/ipv4.go new file mode 100644 index 000000000..ffc66ed4d --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv4.go @@ -0,0 +1,61 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "runtime" + + "golang.org/x/net/internal/socket" + "golang.org/x/net/ipv4" +) + +// freebsdVersion is set in sys_freebsd.go. +// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. +var freebsdVersion uint32 + +// ParseIPv4Header parses b as an IPv4 header of ICMP error message +// invoking packet, which is contained in ICMP error message. +func ParseIPv4Header(b []byte) (*ipv4.Header, error) { + if len(b) < ipv4.HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if hdrlen > len(b) { + return nil, errBufferTooShort + } + h := &ipv4.Header{ + Version: int(b[0] >> 4), + Len: hdrlen, + TOS: int(b[1]), + ID: int(binary.BigEndian.Uint16(b[4:6])), + FragOff: int(binary.BigEndian.Uint16(b[6:8])), + TTL: int(b[8]), + Protocol: int(b[9]), + Checksum: int(binary.BigEndian.Uint16(b[10:12])), + Src: net.IPv4(b[12], b[13], b[14], b[15]), + Dst: net.IPv4(b[16], b[17], b[18], b[19]), + } + switch runtime.GOOS { + case "darwin": + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + case "freebsd": + if freebsdVersion >= 1000000 { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + } else { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + } + h.Flags = ipv4.HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + if hdrlen-ipv4.HeaderLen > 0 { + h.Options = make([]byte, hdrlen-ipv4.HeaderLen) + copy(h.Options, b[ipv4.HeaderLen:]) + } + return h, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv4_test.go b/vendor/golang.org/x/net/icmp/ipv4_test.go new file mode 100644 index 000000000..058953f43 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv4_test.go @@ -0,0 +1,83 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/socket" + "golang.org/x/net/ipv4" +) + +type ipv4HeaderTest struct { + wireHeaderFromKernel [ipv4.HeaderLen]byte + wireHeaderFromTradBSDKernel [ipv4.HeaderLen]byte + Header *ipv4.Header +} + +var ipv4HeaderLittleEndianTest = ipv4HeaderTest{ + // TODO(mikio): Add platform dependent wire header formats when + // we support new platforms. + wireHeaderFromKernel: [ipv4.HeaderLen]byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromTradBSDKernel: [ipv4.HeaderLen]byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + Header: &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: 1, + TotalLen: 0xbeef, + ID: 0xcafe, + Flags: ipv4.DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + }, +} + +func TestParseIPv4Header(t *testing.T) { + tt := &ipv4HeaderLittleEndianTest + if socket.NativeEndian != binary.LittleEndian { + t.Skip("no test for non-little endian machine yet") + } + + var wh []byte + switch runtime.GOOS { + case "darwin": + wh = tt.wireHeaderFromTradBSDKernel[:] + case "freebsd": + if freebsdVersion >= 1000000 { + wh = tt.wireHeaderFromKernel[:] + } else { + wh = tt.wireHeaderFromTradBSDKernel[:] + } + default: + wh = tt.wireHeaderFromKernel[:] + } + h, err := ParseIPv4Header(wh) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, tt.Header) { + t.Fatalf("got %#v; want %#v", h, tt.Header) + } +} diff --git a/vendor/golang.org/x/net/icmp/ipv6.go b/vendor/golang.org/x/net/icmp/ipv6.go new file mode 100644 index 000000000..2e8cfeb13 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv6.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + + "golang.org/x/net/internal/iana" +) + +const ipv6PseudoHeaderLen = 2*net.IPv6len + 8 + +// IPv6PseudoHeader returns an IPv6 pseudo header for checksum +// calculation. +func IPv6PseudoHeader(src, dst net.IP) []byte { + b := make([]byte, ipv6PseudoHeaderLen) + copy(b, src.To16()) + copy(b[net.IPv6len:], dst.To16()) + b[len(b)-1] = byte(iana.ProtocolIPv6ICMP) + return b +} diff --git a/vendor/golang.org/x/net/icmp/listen_posix.go b/vendor/golang.org/x/net/icmp/listen_posix.go new file mode 100644 index 000000000..7fac4f965 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/listen_posix.go @@ -0,0 +1,100 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package icmp + +import ( + "net" + "os" + "runtime" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +const sysIP_STRIPHDR = 0x17 // for now only darwin supports this option + +// ListenPacket listens for incoming ICMP packets addressed to +// address. See net.Dial for the syntax of address. +// +// For non-privileged datagram-oriented ICMP endpoints, network must +// be "udp4" or "udp6". The endpoint allows to read, write a few +// limited ICMP messages such as echo request and echo reply. +// Currently only Darwin and Linux support this. +// +// Examples: +// ListenPacket("udp4", "192.168.0.1") +// ListenPacket("udp4", "0.0.0.0") +// ListenPacket("udp6", "fe80::1%en0") +// ListenPacket("udp6", "::") +// +// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" +// followed by a colon and an ICMP protocol number or name. +// +// Examples: +// ListenPacket("ip4:icmp", "192.168.0.1") +// ListenPacket("ip4:1", "0.0.0.0") +// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") +// ListenPacket("ip6:58", "::") +func ListenPacket(network, address string) (*PacketConn, error) { + var family, proto int + switch network { + case "udp4": + family, proto = syscall.AF_INET, iana.ProtocolICMP + case "udp6": + family, proto = syscall.AF_INET6, iana.ProtocolIPv6ICMP + default: + i := last(network, ':') + switch network[:i] { + case "ip4": + proto = iana.ProtocolICMP + case "ip6": + proto = iana.ProtocolIPv6ICMP + } + } + var cerr error + var c net.PacketConn + switch family { + case syscall.AF_INET, syscall.AF_INET6: + s, err := syscall.Socket(family, syscall.SOCK_DGRAM, proto) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + if runtime.GOOS == "darwin" && family == syscall.AF_INET { + if err := syscall.SetsockoptInt(s, iana.ProtocolIP, sysIP_STRIPHDR, 1); err != nil { + syscall.Close(s) + return nil, os.NewSyscallError("setsockopt", err) + } + } + sa, err := sockaddr(family, address) + if err != nil { + syscall.Close(s) + return nil, err + } + if err := syscall.Bind(s, sa); err != nil { + syscall.Close(s) + return nil, os.NewSyscallError("bind", err) + } + f := os.NewFile(uintptr(s), "datagram-oriented icmp") + c, cerr = net.FilePacketConn(f) + f.Close() + default: + c, cerr = net.ListenPacket(network, address) + } + if cerr != nil { + return nil, cerr + } + switch proto { + case iana.ProtocolICMP: + return &PacketConn{c: c, p4: ipv4.NewPacketConn(c)}, nil + case iana.ProtocolIPv6ICMP: + return &PacketConn{c: c, p6: ipv6.NewPacketConn(c)}, nil + default: + return &PacketConn{c: c}, nil + } +} diff --git a/vendor/golang.org/x/net/icmp/listen_stub.go b/vendor/golang.org/x/net/icmp/listen_stub.go new file mode 100644 index 000000000..668728d17 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/listen_stub.go @@ -0,0 +1,33 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package icmp + +// ListenPacket listens for incoming ICMP packets addressed to +// address. See net.Dial for the syntax of address. +// +// For non-privileged datagram-oriented ICMP endpoints, network must +// be "udp4" or "udp6". The endpoint allows to read, write a few +// limited ICMP messages such as echo request and echo reply. +// Currently only Darwin and Linux support this. +// +// Examples: +// ListenPacket("udp4", "192.168.0.1") +// ListenPacket("udp4", "0.0.0.0") +// ListenPacket("udp6", "fe80::1%en0") +// ListenPacket("udp6", "::") +// +// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" +// followed by a colon and an ICMP protocol number or name. +// +// Examples: +// ListenPacket("ip4:icmp", "192.168.0.1") +// ListenPacket("ip4:1", "0.0.0.0") +// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") +// ListenPacket("ip6:58", "::") +func ListenPacket(network, address string) (*PacketConn, error) { + return nil, errOpNoSupport +} diff --git a/vendor/golang.org/x/net/icmp/message.go b/vendor/golang.org/x/net/icmp/message.go new file mode 100644 index 000000000..81140b0df --- /dev/null +++ b/vendor/golang.org/x/net/icmp/message.go @@ -0,0 +1,152 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package icmp provides basic functions for the manipulation of +// messages used in the Internet Control Message Protocols, +// ICMPv4 and ICMPv6. +// +// ICMPv4 and ICMPv6 are defined in RFC 792 and RFC 4443. +// Multi-part message support for ICMP is defined in RFC 4884. +// ICMP extensions for MPLS are defined in RFC 4950. +// ICMP extensions for interface and next-hop identification are +// defined in RFC 5837. +package icmp // import "golang.org/x/net/icmp" + +import ( + "encoding/binary" + "errors" + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. + +var ( + errMessageTooShort = errors.New("message too short") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errOpNoSupport = errors.New("operation not supported") + errNoExtension = errors.New("no extension") + errInvalidExtension = errors.New("invalid extension") +) + +func checksum(b []byte) uint16 { + csumcv := len(b) - 1 // checksum coverage + s := uint32(0) + for i := 0; i < csumcv; i += 2 { + s += uint32(b[i+1])<<8 | uint32(b[i]) + } + if csumcv&1 == 0 { + s += uint32(b[csumcv]) + } + s = s>>16 + s&0xffff + s = s + s>>16 + return ^uint16(s) +} + +// A Type represents an ICMP message type. +type Type interface { + Protocol() int +} + +// A Message represents an ICMP message. +type Message struct { + Type Type // type, either ipv4.ICMPType or ipv6.ICMPType + Code int // code + Checksum int // checksum + Body MessageBody // body +} + +// Marshal returns the binary encoding of the ICMP message m. +// +// For an ICMPv4 message, the returned message always contains the +// calculated checksum field. +// +// For an ICMPv6 message, the returned message contains the calculated +// checksum field when psh is not nil, otherwise the kernel will +// compute the checksum field during the message transmission. +// When psh is not nil, it must be the pseudo header for IPv6. +func (m *Message) Marshal(psh []byte) ([]byte, error) { + var mtype int + switch typ := m.Type.(type) { + case ipv4.ICMPType: + mtype = int(typ) + case ipv6.ICMPType: + mtype = int(typ) + default: + return nil, syscall.EINVAL + } + b := []byte{byte(mtype), byte(m.Code), 0, 0} + if m.Type.Protocol() == iana.ProtocolIPv6ICMP && psh != nil { + b = append(psh, b...) + } + if m.Body != nil && m.Body.Len(m.Type.Protocol()) != 0 { + mb, err := m.Body.Marshal(m.Type.Protocol()) + if err != nil { + return nil, err + } + b = append(b, mb...) + } + if m.Type.Protocol() == iana.ProtocolIPv6ICMP { + if psh == nil { // cannot calculate checksum here + return b, nil + } + off, l := 2*net.IPv6len, len(b)-len(psh) + binary.BigEndian.PutUint32(b[off:off+4], uint32(l)) + } + s := checksum(b) + // Place checksum back in header; using ^= avoids the + // assumption the checksum bytes are zero. + b[len(psh)+2] ^= byte(s) + b[len(psh)+3] ^= byte(s >> 8) + return b[len(psh):], nil +} + +var parseFns = map[Type]func(int, []byte) (MessageBody, error){ + ipv4.ICMPTypeDestinationUnreachable: parseDstUnreach, + ipv4.ICMPTypeTimeExceeded: parseTimeExceeded, + ipv4.ICMPTypeParameterProblem: parseParamProb, + + ipv4.ICMPTypeEcho: parseEcho, + ipv4.ICMPTypeEchoReply: parseEcho, + + ipv6.ICMPTypeDestinationUnreachable: parseDstUnreach, + ipv6.ICMPTypePacketTooBig: parsePacketTooBig, + ipv6.ICMPTypeTimeExceeded: parseTimeExceeded, + ipv6.ICMPTypeParameterProblem: parseParamProb, + + ipv6.ICMPTypeEchoRequest: parseEcho, + ipv6.ICMPTypeEchoReply: parseEcho, +} + +// ParseMessage parses b as an ICMP message. +// Proto must be either the ICMPv4 or ICMPv6 protocol number. +func ParseMessage(proto int, b []byte) (*Message, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + var err error + m := &Message{Code: int(b[1]), Checksum: int(binary.BigEndian.Uint16(b[2:4]))} + switch proto { + case iana.ProtocolICMP: + m.Type = ipv4.ICMPType(b[0]) + case iana.ProtocolIPv6ICMP: + m.Type = ipv6.ICMPType(b[0]) + default: + return nil, syscall.EINVAL + } + if fn, ok := parseFns[m.Type]; !ok { + m.Body, err = parseDefaultMessageBody(proto, b[4:]) + } else { + m.Body, err = fn(proto, b[4:]) + } + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/icmp/message_test.go b/vendor/golang.org/x/net/icmp/message_test.go new file mode 100644 index 000000000..5d2605f8d --- /dev/null +++ b/vendor/golang.org/x/net/icmp/message_test.go @@ -0,0 +1,134 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "net" + "reflect" + "testing" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var marshalAndParseMessageForIPv4Tests = []icmp.Message{ + { + Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: 1, Seq: 2, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + { + Type: ipv4.ICMPTypePhoturis, + Body: &icmp.DefaultMessageBody{ + Data: []byte{0x80, 0x40, 0x20, 0x10}, + }, + }, +} + +func TestMarshalAndParseMessageForIPv4(t *testing.T) { + for i, tt := range marshalAndParseMessageForIPv4Tests { + b, err := tt.Marshal(nil) + if err != nil { + t.Fatal(err) + } + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + if !reflect.DeepEqual(m.Body, tt.Body) { + t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body) + } + } +} + +var marshalAndParseMessageForIPv6Tests = []icmp.Message{ + { + Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypePacketTooBig, Code: 0, + Body: &icmp.PacketTooBig{ + MTU: 1<<16 - 1, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: 1, Seq: 2, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + { + Type: ipv6.ICMPTypeDuplicateAddressConfirmation, + Body: &icmp.DefaultMessageBody{ + Data: []byte{0x80, 0x40, 0x20, 0x10}, + }, + }, +} + +func TestMarshalAndParseMessageForIPv6(t *testing.T) { + pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")) + for i, tt := range marshalAndParseMessageForIPv6Tests { + for _, psh := range [][]byte{pshicmp, nil} { + b, err := tt.Marshal(psh) + if err != nil { + t.Fatal(err) + } + m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + if !reflect.DeepEqual(m.Body, tt.Body) { + t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body) + } + } + } +} diff --git a/vendor/golang.org/x/net/icmp/messagebody.go b/vendor/golang.org/x/net/icmp/messagebody.go new file mode 100644 index 000000000..2463730ae --- /dev/null +++ b/vendor/golang.org/x/net/icmp/messagebody.go @@ -0,0 +1,41 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A MessageBody represents an ICMP message body. +type MessageBody interface { + // Len returns the length of ICMP message body. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Len(proto int) int + + // Marshal returns the binary encoding of ICMP message body. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Marshal(proto int) ([]byte, error) +} + +// A DefaultMessageBody represents the default message body. +type DefaultMessageBody struct { + Data []byte // data +} + +// Len implements the Len method of MessageBody interface. +func (p *DefaultMessageBody) Len(proto int) int { + if p == nil { + return 0 + } + return len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *DefaultMessageBody) Marshal(proto int) ([]byte, error) { + return p.Data, nil +} + +// parseDefaultMessageBody parses b as an ICMP message body. +func parseDefaultMessageBody(proto int, b []byte) (MessageBody, error) { + p := &DefaultMessageBody{Data: make([]byte, len(b))} + copy(p.Data, b) + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/mpls.go b/vendor/golang.org/x/net/icmp/mpls.go new file mode 100644 index 000000000..c31491748 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/mpls.go @@ -0,0 +1,77 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// A MPLSLabel represents a MPLS label stack entry. +type MPLSLabel struct { + Label int // label value + TC int // traffic class; formerly experimental use + S bool // bottom of stack + TTL int // time to live +} + +const ( + classMPLSLabelStack = 1 + typeIncomingMPLSLabelStack = 1 +) + +// A MPLSLabelStack represents a MPLS label stack. +type MPLSLabelStack struct { + Class int // extension object class number + Type int // extension object sub-type + Labels []MPLSLabel +} + +// Len implements the Len method of Extension interface. +func (ls *MPLSLabelStack) Len(proto int) int { + return 4 + (4 * len(ls.Labels)) +} + +// Marshal implements the Marshal method of Extension interface. +func (ls *MPLSLabelStack) Marshal(proto int) ([]byte, error) { + b := make([]byte, ls.Len(proto)) + if err := ls.marshal(proto, b); err != nil { + return nil, err + } + return b, nil +} + +func (ls *MPLSLabelStack) marshal(proto int, b []byte) error { + l := ls.Len(proto) + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classMPLSLabelStack, typeIncomingMPLSLabelStack + off := 4 + for _, ll := range ls.Labels { + b[off], b[off+1], b[off+2] = byte(ll.Label>>12), byte(ll.Label>>4&0xff), byte(ll.Label<<4&0xf0) + b[off+2] |= byte(ll.TC << 1 & 0x0e) + if ll.S { + b[off+2] |= 0x1 + } + b[off+3] = byte(ll.TTL) + off += 4 + } + return nil +} + +func parseMPLSLabelStack(b []byte) (Extension, error) { + ls := &MPLSLabelStack{ + Class: int(b[2]), + Type: int(b[3]), + } + for b = b[4:]; len(b) >= 4; b = b[4:] { + ll := MPLSLabel{ + Label: int(b[0])<<12 | int(b[1])<<4 | int(b[2])>>4, + TC: int(b[2]&0x0e) >> 1, + TTL: int(b[3]), + } + if b[2]&0x1 != 0 { + ll.S = true + } + ls.Labels = append(ls.Labels, ll) + } + return ls, nil +} diff --git a/vendor/golang.org/x/net/icmp/multipart.go b/vendor/golang.org/x/net/icmp/multipart.go new file mode 100644 index 000000000..f27135660 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/multipart.go @@ -0,0 +1,109 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "golang.org/x/net/internal/iana" + +// multipartMessageBodyDataLen takes b as an original datagram and +// exts as extensions, and returns a required length for message body +// and a required length for a padded original datagram in wire +// format. +func multipartMessageBodyDataLen(proto int, b []byte, exts []Extension) (bodyLen, dataLen int) { + for _, ext := range exts { + bodyLen += ext.Len(proto) + } + if bodyLen > 0 { + dataLen = multipartMessageOrigDatagramLen(proto, b) + bodyLen += 4 // length of extension header + } else { + dataLen = len(b) + } + bodyLen += dataLen + return bodyLen, dataLen +} + +// multipartMessageOrigDatagramLen takes b as an original datagram, +// and returns a required length for a padded orignal datagram in wire +// format. +func multipartMessageOrigDatagramLen(proto int, b []byte) int { + roundup := func(b []byte, align int) int { + // According to RFC 4884, the padded original datagram + // field must contain at least 128 octets. + if len(b) < 128 { + return 128 + } + r := len(b) + return (r + align - 1) & ^(align - 1) + } + switch proto { + case iana.ProtocolICMP: + return roundup(b, 4) + case iana.ProtocolIPv6ICMP: + return roundup(b, 8) + default: + return len(b) + } +} + +// marshalMultipartMessageBody takes data as an original datagram and +// exts as extesnsions, and returns a binary encoding of message body. +// It can be used for non-multipart message bodies when exts is nil. +func marshalMultipartMessageBody(proto int, data []byte, exts []Extension) ([]byte, error) { + bodyLen, dataLen := multipartMessageBodyDataLen(proto, data, exts) + b := make([]byte, 4+bodyLen) + copy(b[4:], data) + off := dataLen + 4 + if len(exts) > 0 { + b[dataLen+4] = byte(extensionVersion << 4) + off += 4 // length of object header + for _, ext := range exts { + switch ext := ext.(type) { + case *MPLSLabelStack: + if err := ext.marshal(proto, b[off:]); err != nil { + return nil, err + } + off += ext.Len(proto) + case *InterfaceInfo: + attrs, l := ext.attrsAndLen(proto) + if err := ext.marshal(proto, b[off:], attrs, l); err != nil { + return nil, err + } + off += ext.Len(proto) + } + } + s := checksum(b[dataLen+4:]) + b[dataLen+4+2] ^= byte(s) + b[dataLen+4+3] ^= byte(s >> 8) + switch proto { + case iana.ProtocolICMP: + b[1] = byte(dataLen / 4) + case iana.ProtocolIPv6ICMP: + b[0] = byte(dataLen / 8) + } + } + return b, nil +} + +// parseMultipartMessageBody parses b as either a non-multipart +// message body or a multipart message body. +func parseMultipartMessageBody(proto int, b []byte) ([]byte, []Extension, error) { + var l int + switch proto { + case iana.ProtocolICMP: + l = 4 * int(b[1]) + case iana.ProtocolIPv6ICMP: + l = 8 * int(b[0]) + } + if len(b) == 4 { + return nil, nil, nil + } + exts, l, err := parseExtensions(b[4:], l) + if err != nil { + l = len(b) - 4 + } + data := make([]byte, l) + copy(data, b[4:]) + return data, exts, nil +} diff --git a/vendor/golang.org/x/net/icmp/multipart_test.go b/vendor/golang.org/x/net/icmp/multipart_test.go new file mode 100644 index 000000000..966ccb8da --- /dev/null +++ b/vendor/golang.org/x/net/icmp/multipart_test.go @@ -0,0 +1,442 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "fmt" + "net" + "reflect" + "testing" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var marshalAndParseMultipartMessageForIPv4Tests = []icmp.Message{ + { + Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x2f, + Interface: &net.Interface{ + Index: 16, + Name: "en102", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 2).To4(), + }, + }, + }, + }, + }, +} + +func TestMarshalAndParseMultipartMessageForIPv4(t *testing.T) { + for i, tt := range marshalAndParseMultipartMessageForIPv4Tests { + b, err := tt.Marshal(nil) + if err != nil { + t.Fatal(err) + } + if b[5] != 32 { + t.Errorf("#%v: got %v; want 32", i, b[5]) + } + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + switch m.Type { + case ipv4.ICMPTypeDestinationUnreachable: + got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv4.ICMPTypeTimeExceeded: + got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv4.ICMPTypeParameterProblem: + got, want := m.Body.(*icmp.ParamProb), tt.Body.(*icmp.ParamProb) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + } + } +} + +var marshalAndParseMultipartMessageForIPv6Tests = []icmp.Message{ + { + Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + }, + }, + }, + { + Type: ipv6.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x2f, + Interface: &net.Interface{ + Index: 16, + Name: "en102", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en102", + }, + }, + }, + }, + }, +} + +func TestMarshalAndParseMultipartMessageForIPv6(t *testing.T) { + pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")) + for i, tt := range marshalAndParseMultipartMessageForIPv6Tests { + for _, psh := range [][]byte{pshicmp, nil} { + b, err := tt.Marshal(psh) + if err != nil { + t.Fatal(err) + } + if b[4] != 16 { + t.Errorf("#%v: got %v; want 16", i, b[4]) + } + m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + switch m.Type { + case ipv6.ICMPTypeDestinationUnreachable: + got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv6.ICMPTypeTimeExceeded: + got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + } + } + } +} + +func dumpExtensions(i int, gotExts, wantExts []icmp.Extension) string { + var s string + for j, got := range gotExts { + switch got := got.(type) { + case *icmp.MPLSLabelStack: + want := wantExts[j].(*icmp.MPLSLabelStack) + if !reflect.DeepEqual(got, want) { + s += fmt.Sprintf("#%v/%v: got %#v; want %#v\n", i, j, got, want) + } + case *icmp.InterfaceInfo: + want := wantExts[j].(*icmp.InterfaceInfo) + if !reflect.DeepEqual(got, want) { + s += fmt.Sprintf("#%v/%v: got %#v, %#v, %#v; want %#v, %#v, %#v\n", i, j, got, got.Interface, got.Addr, want, want.Interface, want.Addr) + } + } + } + return s[:len(s)-1] +} + +var multipartMessageBodyLenTests = []struct { + proto int + in icmp.MessageBody + out int +}{ + { + iana.ProtocolICMP, + &icmp.DstUnreach{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolICMP, + &icmp.TimeExceeded{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // [pointer, unused] and original datagram + }, + + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv4.HeaderLen), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload, original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, 128), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, 129), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 132, // [pointer, length, unused], extension header, object header, object payload and original datagram + }, + + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.PacketTooBig{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // mtu and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.TimeExceeded{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // pointer and original datagram + }, + + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 127), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 128), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 129), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 136, // [length, unused], extension header, object header, object payload and original datagram + }, +} + +func TestMultipartMessageBodyLen(t *testing.T) { + for i, tt := range multipartMessageBodyLenTests { + if out := tt.in.Len(tt.proto); out != tt.out { + t.Errorf("#%d: got %d; want %d", i, out, tt.out) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/packettoobig.go b/vendor/golang.org/x/net/icmp/packettoobig.go new file mode 100644 index 000000000..a1c9df7bf --- /dev/null +++ b/vendor/golang.org/x/net/icmp/packettoobig.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// A PacketTooBig represents an ICMP packet too big message body. +type PacketTooBig struct { + MTU int // maximum transmission unit of the nexthop link + Data []byte // data, known as original datagram field +} + +// Len implements the Len method of MessageBody interface. +func (p *PacketTooBig) Len(proto int) int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *PacketTooBig) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + binary.BigEndian.PutUint32(b[:4], uint32(p.MTU)) + copy(b[4:], p.Data) + return b, nil +} + +// parsePacketTooBig parses b as an ICMP packet too big message body. +func parsePacketTooBig(proto int, b []byte) (MessageBody, error) { + bodyLen := len(b) + if bodyLen < 4 { + return nil, errMessageTooShort + } + p := &PacketTooBig{MTU: int(binary.BigEndian.Uint32(b[:4]))} + if bodyLen > 4 { + p.Data = make([]byte, bodyLen-4) + copy(p.Data, b[4:]) + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/paramprob.go b/vendor/golang.org/x/net/icmp/paramprob.go new file mode 100644 index 000000000..0a2548daa --- /dev/null +++ b/vendor/golang.org/x/net/icmp/paramprob.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "golang.org/x/net/internal/iana" +) + +// A ParamProb represents an ICMP parameter problem message body. +type ParamProb struct { + Pointer uintptr // offset within the data where the error was detected + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *ParamProb) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *ParamProb) Marshal(proto int) ([]byte, error) { + if proto == iana.ProtocolIPv6ICMP { + b := make([]byte, p.Len(proto)) + binary.BigEndian.PutUint32(b[:4], uint32(p.Pointer)) + copy(b[4:], p.Data) + return b, nil + } + b, err := marshalMultipartMessageBody(proto, p.Data, p.Extensions) + if err != nil { + return nil, err + } + b[0] = byte(p.Pointer) + return b, nil +} + +// parseParamProb parses b as an ICMP parameter problem message body. +func parseParamProb(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &ParamProb{} + if proto == iana.ProtocolIPv6ICMP { + p.Pointer = uintptr(binary.BigEndian.Uint32(b[:4])) + p.Data = make([]byte, len(b)-4) + copy(p.Data, b[4:]) + return p, nil + } + p.Pointer = uintptr(b[0]) + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/ping_test.go b/vendor/golang.org/x/net/icmp/ping_test.go new file mode 100644 index 000000000..3171dad11 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ping_test.go @@ -0,0 +1,200 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "errors" + "fmt" + "net" + "os" + "runtime" + "sync" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +func googleAddr(c *icmp.PacketConn, protocol int) (net.Addr, error) { + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + return nil, err + } + netaddr := func(ip net.IP) (net.Addr, error) { + switch c.LocalAddr().(type) { + case *net.UDPAddr: + return &net.UDPAddr{IP: ip}, nil + case *net.IPAddr: + return &net.IPAddr{IP: ip}, nil + default: + return nil, errors.New("neither UDPAddr nor IPAddr") + } + } + for _, ip := range ips { + switch protocol { + case iana.ProtocolICMP: + if ip.To4() != nil { + return netaddr(ip) + } + case iana.ProtocolIPv6ICMP: + if ip.To16() != nil && ip.To4() == nil { + return netaddr(ip) + } + } + } + return nil, errors.New("no A or AAAA record") +} + +type pingTest struct { + network, address string + protocol int + mtype icmp.Type +} + +var nonPrivilegedPingTests = []pingTest{ + {"udp4", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, + + {"udp6", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, +} + +func TestNonPrivilegedPing(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + switch runtime.GOOS { + case "darwin": + case "linux": + t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + for i, tt := range nonPrivilegedPingTests { + if err := doPing(tt, i); err != nil { + t.Error(err) + } + } +} + +var privilegedPingTests = []pingTest{ + {"ip4:icmp", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, + + {"ip6:ipv6-icmp", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, +} + +func TestPrivilegedPing(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + for i, tt := range privilegedPingTests { + if err := doPing(tt, i); err != nil { + t.Error(err) + } + } +} + +func doPing(tt pingTest, seq int) error { + c, err := icmp.ListenPacket(tt.network, tt.address) + if err != nil { + return err + } + defer c.Close() + + dst, err := googleAddr(c, tt.protocol) + if err != nil { + return err + } + + if tt.network != "udp6" && tt.protocol == iana.ProtocolIPv6ICMP { + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeDestinationUnreachable) + f.Accept(ipv6.ICMPTypePacketTooBig) + f.Accept(ipv6.ICMPTypeTimeExceeded) + f.Accept(ipv6.ICMPTypeParameterProblem) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := c.IPv6PacketConn().SetICMPFilter(&f); err != nil { + return err + } + } + + wm := icmp.Message{ + Type: tt.mtype, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: 1 << uint(seq), + Data: []byte("HELLO-R-U-THERE"), + }, + } + wb, err := wm.Marshal(nil) + if err != nil { + return err + } + if n, err := c.WriteTo(wb, dst); err != nil { + return err + } else if n != len(wb) { + return fmt.Errorf("got %v; want %v", n, len(wb)) + } + + rb := make([]byte, 1500) + if err := c.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + return err + } + n, peer, err := c.ReadFrom(rb) + if err != nil { + return err + } + rm, err := icmp.ParseMessage(tt.protocol, rb[:n]) + if err != nil { + return err + } + switch rm.Type { + case ipv4.ICMPTypeEchoReply, ipv6.ICMPTypeEchoReply: + return nil + default: + return fmt.Errorf("got %+v from %v; want echo reply", rm, peer) + } +} + +func TestConcurrentNonPrivilegedListenPacket(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + switch runtime.GOOS { + case "darwin": + case "linux": + t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + network, address := "udp4", "127.0.0.1" + if !nettest.SupportsIPv4() { + network, address = "udp6", "::1" + } + const N = 1000 + var wg sync.WaitGroup + wg.Add(N) + for i := 0; i < N; i++ { + go func() { + defer wg.Done() + c, err := icmp.ListenPacket(network, address) + if err != nil { + t.Error(err) + return + } + c.Close() + }() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/icmp/sys_freebsd.go b/vendor/golang.org/x/net/icmp/sys_freebsd.go new file mode 100644 index 000000000..c75f3ddaa --- /dev/null +++ b/vendor/golang.org/x/net/icmp/sys_freebsd.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "syscall" + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") +} diff --git a/vendor/golang.org/x/net/icmp/timeexceeded.go b/vendor/golang.org/x/net/icmp/timeexceeded.go new file mode 100644 index 000000000..344e15848 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/timeexceeded.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A TimeExceeded represents an ICMP time exceeded message body. +type TimeExceeded struct { + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *TimeExceeded) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *TimeExceeded) Marshal(proto int) ([]byte, error) { + return marshalMultipartMessageBody(proto, p.Data, p.Extensions) +} + +// parseTimeExceeded parses b as an ICMP time exceeded message body. +func parseTimeExceeded(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &TimeExceeded{} + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/idna/example_test.go b/vendor/golang.org/x/net/idna/example_test.go new file mode 100644 index 000000000..948f6eb20 --- /dev/null +++ b/vendor/golang.org/x/net/idna/example_test.go @@ -0,0 +1,70 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna_test + +import ( + "fmt" + + "golang.org/x/net/idna" +) + +func ExampleProfile() { + // Raw Punycode has no restrictions and does no mappings. + fmt.Println(idna.ToASCII("")) + fmt.Println(idna.ToASCII("*.faß.com")) + fmt.Println(idna.Punycode.ToASCII("*.faß.com")) + + // Rewrite IDN for lookup. This (currently) uses transitional mappings to + // find a balance between IDNA2003 and IDNA2008 compatibility. + fmt.Println(idna.Lookup.ToASCII("")) + fmt.Println(idna.Lookup.ToASCII("www.faß.com")) + + // Convert an IDN to ASCII for registration purposes. This changes the + // encoding, but reports an error if the input was illformed. + fmt.Println(idna.Registration.ToASCII("")) + fmt.Println(idna.Registration.ToASCII("www.faß.com")) + + // Output: + // + // *.xn--fa-hia.com + // *.xn--fa-hia.com + // + // www.fass.com + // idna: invalid label "" + // www.xn--fa-hia.com +} + +func ExampleNew() { + var p *idna.Profile + + // Raw Punycode has no restrictions and does no mappings. + p = idna.New() + fmt.Println(p.ToASCII("*.faß.com")) + + // Do mappings. Note that star is not allowed in a DNS lookup. + p = idna.New( + idna.MapForLookup(), + idna.Transitional(true)) // Map ß -> ss + fmt.Println(p.ToASCII("*.faß.com")) + + // Lookup for registration. Also does not allow '*'. + p = idna.New(idna.ValidateForRegistration()) + fmt.Println(p.ToUnicode("*.faß.com")) + + // Set up a profile maps for lookup, but allows wild cards. + p = idna.New( + idna.MapForLookup(), + idna.Transitional(true), // Map ß -> ss + idna.StrictDomainName(false)) // Set more permissive ASCII rules. + fmt.Println(p.ToASCII("*.faß.com")) + + // Output: + // *.xn--fa-hia.com + // *.fass.com idna: disallowed rune U+002A + // *.faß.com idna: disallowed rune U+002A + // *.fass.com +} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 000000000..346fe4423 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,732 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in http://www.unicode.org/reports/tr46. +// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. +package idna // import "golang.org/x/net/idna" + +import ( + "fmt" + "strings" + "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/bidi" + "golang.org/x/text/unicode/norm" +) + +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. + +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissible ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (mapped string, isBidi bool, err error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of an IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" + } + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see http://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + var isBidi bool + if p.mapping != nil { + s, isBidi, err = p.mapping(p, s) + } + // Remove leading empty labels. + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } + } + // TODO: allow for a quick check of the tables data. + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if isBidi && p.bidirule != nil && err == nil { + for labels.reset(); !labels.done(); labels.next() { + if !p.bidirule(labels.label()) { + err = &labelError{s, "B"} + break + } + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) { + // TODO: consider first doing a quick check to see if any of these checks + // need to be done. This will make it slower in the general case, but + // faster in the common case. + mapped = norm.NFC.String(s) + isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft + return mapped, isBidi, nil +} + +func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) { + // TODO: filter need for normalization in loop below. + if !norm.NFC.IsNormalString(s) { + return s, false, &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return s, bidi, runeError(utf8.RuneError) + } + bidi = bidi || info(v).isBidi(s[i:]) + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, bidi, runeError(r) + } + i += sz + } + return s, bidi, nil +} + +func (c info) isBidi(s string) bool { + if !c.isMapped() { + return c&attributesMask == rtl + } + // TODO: also store bidi info for mapped data. This is possible, but a bit + // cumbersome and not for the common case. + p, _ := bidi.LookupString(s) + switch p.Class() { + case bidi.R, bidi.AL, bidi.AN: + return true + } + return false +} + +func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) { + var ( + b []byte + k int + ) + // combinedInfoBits contains the or-ed bits of all runes. We use this + // to derive the mayNeedNorm bit later. This may trigger normalization + // overeagerly, but it will not do so in the common case. The end result + // is another 10% saving on BenchmarkProfile for the common case. + var combinedInfoBits info + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + b = append(b, s[k:i]...) + b = append(b, "\ufffd"...) + k = len(s) + if err == nil { + err = runeError(utf8.RuneError) + } + break + } + combinedInfoBits |= info(v) + bidi = bidi || info(v).isBidi(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[start:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + if combinedInfoBits&mayNeedNorm != 0 { + s = norm.NFC.String(s) + } + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, bidi, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + // TODO: detect whether string may have to be normalized in the following + // loop. + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return runeError(utf8.RuneError) + } + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) (err error) { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/idna_test.go b/vendor/golang.org/x/net/idna/idna_test.go new file mode 100644 index 000000000..0b067cac9 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna_test.go @@ -0,0 +1,108 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +import ( + "testing" +) + +var idnaTestCases = [...]struct { + ascii, unicode string +}{ + // Labels. + {"books", "books"}, + {"xn--bcher-kva", "bücher"}, + + // Domains. + {"foo--xn--bar.org", "foo--xn--bar.org"}, + {"golang.org", "golang.org"}, + {"example.xn--p1ai", "example.рф"}, + {"xn--czrw28b.tw", "商業.tw"}, + {"www.xn--mller-kva.de", "www.müller.de"}, +} + +func TestIDNA(t *testing.T) { + for _, tc := range idnaTestCases { + if a, err := ToASCII(tc.unicode); err != nil { + t.Errorf("ToASCII(%q): %v", tc.unicode, err) + } else if a != tc.ascii { + t.Errorf("ToASCII(%q): got %q, want %q", tc.unicode, a, tc.ascii) + } + + if u, err := ToUnicode(tc.ascii); err != nil { + t.Errorf("ToUnicode(%q): %v", tc.ascii, err) + } else if u != tc.unicode { + t.Errorf("ToUnicode(%q): got %q, want %q", tc.ascii, u, tc.unicode) + } + } +} + +func TestIDNASeparators(t *testing.T) { + type subCase struct { + unicode string + wantASCII string + wantErr bool + } + + testCases := []struct { + name string + profile *Profile + subCases []subCase + }{ + { + name: "Punycode", profile: Punycode, + subCases: []subCase{ + {"example\u3002jp", "xn--examplejp-ck3h", false}, + {"東京\uFF0Ejp", "xn--jp-l92cn98g071o", false}, + {"大阪\uFF61jp", "xn--jp-ku9cz72u463f", false}, + }, + }, + { + name: "Lookup", profile: Lookup, + subCases: []subCase{ + {"example\u3002jp", "example.jp", false}, + {"東京\uFF0Ejp", "xn--1lqs71d.jp", false}, + {"大阪\uFF61jp", "xn--pssu33l.jp", false}, + }, + }, + { + name: "Display", profile: Display, + subCases: []subCase{ + {"example\u3002jp", "example.jp", false}, + {"東京\uFF0Ejp", "xn--1lqs71d.jp", false}, + {"大阪\uFF61jp", "xn--pssu33l.jp", false}, + }, + }, + { + name: "Registration", profile: Registration, + subCases: []subCase{ + {"example\u3002jp", "", true}, + {"東京\uFF0Ejp", "", true}, + {"大阪\uFF61jp", "", true}, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + for _, c := range tc.subCases { + gotA, err := tc.profile.ToASCII(c.unicode) + if c.wantErr { + if err == nil { + t.Errorf("ToASCII(%q): got no error, but an error expected", c.unicode) + } + } else { + if err != nil { + t.Errorf("ToASCII(%q): got err=%v, but no error expected", c.unicode, err) + } else if gotA != c.wantASCII { + t.Errorf("ToASCII(%q): got %q, want %q", c.unicode, gotA, c.wantASCII) + } + } + } + }) + } +} + +// TODO(nigeltao): test errors, once we've specified when ToASCII and ToUnicode +// return errors. diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 000000000..02c7d59af --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,203 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +func punyError(s string) error { return &labelError{s, "A3"} } + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", punyError(encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", punyError(encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", punyError(encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", punyError(encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", punyError(encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", punyError(encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", punyError(s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", punyError(s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/idna/punycode_test.go b/vendor/golang.org/x/net/idna/punycode_test.go new file mode 100644 index 000000000..bfec81dec --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode_test.go @@ -0,0 +1,198 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +import ( + "strings" + "testing" +) + +var punycodeTestCases = [...]struct { + s, encoded string +}{ + {"", ""}, + {"-", "--"}, + {"-a", "-a-"}, + {"-a-", "-a--"}, + {"a", "a-"}, + {"a-", "a--"}, + {"a-b", "a-b-"}, + {"books", "books-"}, + {"bücher", "bcher-kva"}, + {"Hello世界", "Hello-ck1hg65u"}, + {"ü", "tda"}, + {"üý", "tdac"}, + + // The test cases below come from RFC 3492 section 7.1 with Errata 3026. + { + // (A) Arabic (Egyptian). + "\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" + + "\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F", + "egbpdaj6bu4bxfgehfvwxn", + }, + { + // (B) Chinese (simplified). + "\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587", + "ihqwcrb4cv8a8dqg056pqjye", + }, + { + // (C) Chinese (traditional). + "\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587", + "ihqwctvzc91f659drss3x8bo0yb", + }, + { + // (D) Czech. + "\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" + + "\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" + + "\u0065\u0073\u006B\u0079", + "Proprostnemluvesky-uyb24dma41a", + }, + { + // (E) Hebrew. + "\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" + + "\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" + + "\u05D1\u05E8\u05D9\u05EA", + "4dbcagdahymbxekheh6e0a7fei0b", + }, + { + // (F) Hindi (Devanagari). + "\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" + + "\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" + + "\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" + + "\u0939\u0948\u0902", + "i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd", + }, + { + // (G) Japanese (kanji and hiragana). + "\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" + + "\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B", + "n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa", + }, + { + // (H) Korean (Hangul syllables). + "\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" + + "\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" + + "\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C", + "989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" + + "psd879ccm6fea98c", + }, + { + // (I) Russian (Cyrillic). + "\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" + + "\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" + + "\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" + + "\u0438", + "b1abfaaepdrnnbgefbadotcwatmq2g4l", + }, + { + // (J) Spanish. + "\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" + + "\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" + + "\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" + + "\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" + + "\u0061\u00F1\u006F\u006C", + "PorqunopuedensimplementehablarenEspaol-fmd56a", + }, + { + // (K) Vietnamese. + "\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" + + "\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" + + "\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" + + "\u0056\u0069\u1EC7\u0074", + "TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g", + }, + { + // (L) 3B. + "\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F", + "3B-ww4c5e180e575a65lsy2b", + }, + { + // (M) -with-SUPER-MONKEYS. + "\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" + + "\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" + + "\u004F\u004E\u004B\u0045\u0059\u0053", + "-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n", + }, + { + // (N) Hello-Another-Way-. + "\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" + + "\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" + + "\u305D\u308C\u305E\u308C\u306E\u5834\u6240", + "Hello-Another-Way--fc4qua05auwb3674vfr0b", + }, + { + // (O) 2. + "\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032", + "2-u9tlzr9756bt3uc0v", + }, + { + // (P) MajiKoi5 + "\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" + + "\u308B\u0035\u79D2\u524D", + "MajiKoi5-783gue6qz075azm5e", + }, + { + // (Q) de + "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", + "de-jg4avhby1noc0d", + }, + { + // (R) + "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067", + "d9juau41awczczp", + }, + { + // (S) -> $1.00 <- + "\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" + + "\u003C\u002D", + "-> $1.00 <--", + }, +} + +func TestPunycode(t *testing.T) { + for _, tc := range punycodeTestCases { + if got, err := decode(tc.encoded); err != nil { + t.Errorf("decode(%q): %v", tc.encoded, err) + } else if got != tc.s { + t.Errorf("decode(%q): got %q, want %q", tc.encoded, got, tc.s) + } + + if got, err := encode("", tc.s); err != nil { + t.Errorf(`encode("", %q): %v`, tc.s, err) + } else if got != tc.encoded { + t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded) + } + } +} + +var punycodeErrorTestCases = [...]string{ + "decode -", // A sole '-' is invalid. + "decode foo\x00bar", // '\x00' is not in [0-9A-Za-z]. + "decode foo#bar", // '#' is not in [0-9A-Za-z]. + "decode foo\u00A3bar", // '\u00A3' is not in [0-9A-Za-z]. + "decode 9", // "9a" decodes to codepoint \u00A3; "9" is truncated. + "decode 99999a", // "99999a" decodes to codepoint \U0048A3C1, which is > \U0010FFFF. + "decode 9999999999a", // "9999999999a" overflows the int32 calculation. + + "encode " + strings.Repeat("x", 65536) + "\uff00", // int32 overflow. +} + +func TestPunycodeErrors(t *testing.T) { + for _, tc := range punycodeErrorTestCases { + var err error + switch { + case strings.HasPrefix(tc, "decode "): + _, err = decode(tc[7:]) + case strings.HasPrefix(tc, "encode "): + _, err = encode("", tc[7:]) + } + if err == nil { + if len(tc) > 256 { + tc = tc[:100] + "..." + tc[len(tc)-100:] + } + t.Errorf("no error for %s", tc) + } + } +} diff --git a/vendor/golang.org/x/net/idna/tables.go b/vendor/golang.org/x/net/idna/tables.go new file mode 100644 index 000000000..f910b2691 --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables.go @@ -0,0 +1,4557 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +var mappings string = "" + // Size: 8176 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 125: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 125 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 127 blocks, 8128 entries, 16256 bytes +// The third block is the zero block. +var idnaValues = [8128]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, + // Block 0x40, offset 0x1000 + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, + // Block 0x41, offset 0x1040 + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, + // Block 0x42, offset 0x1080 + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, + // Block 0x50, offset 0x1400 + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, + // Block 0x51, offset 0x1440 + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, + // Block 0x52, offset 0x1480 + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, + // Block 0x54, offset 0x1500 + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, + // Block 0x56, offset 0x1580 + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, + // Block 0x58, offset 0x1600 + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, + // Block 0x65, offset 0x1940 + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, +} + +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var idnaIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e, + 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137, + 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160, + 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e, + 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172, + 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, + // Block 0x1d, offset 0x740 + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, +} + +// idnaSparseOffset: 264 entries, 528 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778} + +// idnaSparseValues: 1915 entries, 7660 bytes +var idnaSparseValues = [1915]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xc, offset 0x6b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x77 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8a + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x93 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa3 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbd + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xc9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xda + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe4 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xeb + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x109 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x110 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11b + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12a + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x138 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x142 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x144 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x149 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14c + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x14f + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x151 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x168 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x170 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x176 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x181 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x186 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x189 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18d + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x193 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x198 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a4 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1ae + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b4 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c5 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1cf + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x34, offset 0x1d2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1da + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1dd + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1ea + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f2 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f6 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fd + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x205 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x215 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x221 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x223 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22d + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x239 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x245 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x251 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x259 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x25e + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x268 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x279 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x288 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x28c + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x295 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x29d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2a8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2ab + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2af + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2b5 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2b9 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2bd + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x53, offset 0x2c3 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x54, offset 0x2ca + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2d0 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2d8 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2df + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2ea + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f4 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5a, offset 0x2f8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0x5b, offset 0x2fb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5c, offset 0x301 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5d, offset 0x305 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5e, offset 0x307 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x5f, offset 0x30a + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x30c + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x61, offset 0x30f + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x62, offset 0x319 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x31c + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x65, offset 0x32f + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x66, offset 0x334 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0x67, offset 0x337 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x68, offset 0x33b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x340 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6a, offset 0x345 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x34b + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x351 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x360 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6e, offset 0x366 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x36a + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x70, offset 0x379 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x71, offset 0x37e + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x72, offset 0x386 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x390 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x39b + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3a3 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3b4 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3bd + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x78, offset 0x3cd + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3da + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3e4 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3e9 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7c, offset 0x3f6 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3fa + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7e, offset 0x3ff + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x401 + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x405 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x407 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x40b + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x414 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x41a + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x41e + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x42e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x438 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x43d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x440 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x446 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x44d + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x452 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x456 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x45c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x461 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x46a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x46f + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x475 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x47c + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x483 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x48a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x48e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x493 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x496 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x49b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a7 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4ad + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4b2 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b9 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4c1 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c6 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4ca + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4da + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4e1 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4e5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4e9 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4f0 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4f2 + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x4f5 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x4f8 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4fc + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xaa, offset 0x500 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x506 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x50f + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xad, offset 0x51b + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xae, offset 0x522 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xaf, offset 0x52b + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb0, offset 0x533 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x53a + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb2, offset 0x548 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x555 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb4, offset 0x562 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb5, offset 0x56b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb6, offset 0x56f + {value: 0x0000, lo: 0x0d}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb7, offset 0x57d + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x585 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xb9, offset 0x590 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x599 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbb, offset 0x59f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5a7 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbd, offset 0x5b0 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xbe, offset 0x5ba + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xbf, offset 0x5bd + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc0, offset 0x5c9 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc1, offset 0x5cc + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc2, offset 0x5d1 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc4, offset 0x5e7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc5, offset 0x5f3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc6, offset 0x5f6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x600 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc8, offset 0x609 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc9, offset 0x615 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xca, offset 0x622 + {value: 0x0000, lo: 0x07}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcb, offset 0x62a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcc, offset 0x62d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xcd, offset 0x632 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xce, offset 0x635 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xcf, offset 0x638 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xd0, offset 0x63b + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xd1, offset 0x642 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd2, offset 0x649 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd3, offset 0x64d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xd4, offset 0x658 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd5, offset 0x65b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd6, offset 0x661 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd7, offset 0x666 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xd8, offset 0x66a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd9, offset 0x66d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xda, offset 0x670 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xdb, offset 0x673 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xdc, offset 0x676 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xdd, offset 0x679 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xde, offset 0x67e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xdf, offset 0x688 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe0, offset 0x68b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xe1, offset 0x68f + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x69e + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xe3, offset 0x6aa + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xe4, offset 0x6ae + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xe5, offset 0x6b3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe6, offset 0x6b8 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe7, offset 0x6bc + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe8, offset 0x6c1 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe9, offset 0x6ca + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xea, offset 0x6d5 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xeb, offset 0x6db + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xec, offset 0x6e3 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xed, offset 0x6e7 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xee, offset 0x6eb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xef, offset 0x6f1 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xf0, offset 0x6f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xf1, offset 0x6fc + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xf2, offset 0x6ff + {value: 0x0000, lo: 0x0f}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xf3, offset 0x70f + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf4, offset 0x716 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf5, offset 0x719 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xf6, offset 0x71c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf7, offset 0x720 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf8, offset 0x726 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf9, offset 0x72b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xfa, offset 0x730 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0xfb, offset 0x735 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xbf}, + // Block 0xfc, offset 0x738 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0xfd, offset 0x73d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xfe, offset 0x740 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xff, offset 0x743 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x100, offset 0x747 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x101, offset 0x74b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x102, offset 0x74e + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0x103, offset 0x75e + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x104, offset 0x76f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x105, offset 0x774 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x106, offset 0x776 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x107, offset 0x778 + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E diff --git a/vendor/golang.org/x/net/idna/trie.go b/vendor/golang.org/x/net/idna/trie.go new file mode 100644 index 000000000..c4ef847e7 --- /dev/null +++ b/vendor/golang.org/x/net/idna/trie.go @@ -0,0 +1,72 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + s := mappings[index:] + return append(b, s[1:s[0]+1]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} + +// Sparse block handling code. + +type valueRange struct { + value uint16 // header: value:stride + lo, hi byte // header: lo:n +} + +type sparseBlocks struct { + values []valueRange + offset []uint16 +} + +var idnaSparse = sparseBlocks{ + values: idnaSparseValues[:], + offset: idnaSparseOffset[:], +} + +// Don't use newIdnaTrie to avoid unconditional linking in of the table. +var trie = &idnaTrie{} + +// lookup determines the type of block n and looks up the value for b. +// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block +// is a list of ranges with an accompanying value. Given a matching range r, +// the value for b is by r.value + (b - r.lo) * stride. +func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { + offset := t.offset[n] + header := t.values[offset] + lo := offset + 1 + hi := lo + uint16(header.lo) + for lo < hi { + m := lo + (hi-lo)/2 + r := t.values[m] + if r.lo <= b && b <= r.hi { + return r.value + uint16(b-r.lo)*header.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go new file mode 100644 index 000000000..7a8cf889b --- /dev/null +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -0,0 +1,119 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// This file contains definitions for interpreting the trie value of the idna +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds information from the IDNA mapping table for a single rune. It is +// the value returned by a trie lookup. In most cases, all information fits in +// a 16-bit value. For mappings, this value may contain an index into a slice +// with the mapped string. Such mappings can consist of the actual mapped value +// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the +// input rune. This technique is used by the cases packages and reduces the +// table size significantly. +// +// The per-rune values have the following format: +// +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + catSmallMask = 0x3 + catBigMask = 0xF8 + indexShift = 3 + xorBit = 0x4 // interpret the index as an xor pattern + inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined. + + joinShift = 8 + joinMask = 0x07 + + // Attributes + attributesMask = 0x1800 + viramaModifier = 0x1800 + modifier = 0x1000 + rtl = 0x0800 + + mayNeedNorm = 0x2000 +) + +// A category corresponds to a category defined in the IDNA mapping table. +type category uint16 + +const ( + unknown category = 0 // not currently defined in unicode. + mapped category = 1 + disallowedSTD3Mapped category = 2 + deviation category = 3 +) + +const ( + valid category = 0x08 + validNV8 category = 0x18 + validXV8 category = 0x28 + disallowed category = 0x40 + disallowedSTD3Valid category = 0x80 + ignored category = 0xC0 +) + +// join types and additional rune information +const ( + joiningL = (iota + 1) + joiningD + joiningT + joiningR + + //the following types are derived during processing + joinZWJ + joinZWNJ + joinVirama + numJoinTypes +) + +func (c info) isMapped() bool { + return c&0x3 != 0 +} + +func (c info) category() category { + small := c & catSmallMask + if small != 0 { + return category(small) + } + return category(c & catBigMask) +} + +func (c info) joinType() info { + if c.isMapped() { + return 0 + } + return (c >> joinShift) & joinMask +} + +func (c info) isModifier() bool { + return c&(modifier|catSmallMask) == modifier +} + +func (c info) isViramaModifier() bool { + return c&(attributesMask|catSmallMask) == viramaModifier +} diff --git a/vendor/golang.org/x/net/internal/iana/const.go b/vendor/golang.org/x/net/internal/iana/const.go new file mode 100644 index 000000000..c9df24d95 --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/const.go @@ -0,0 +1,180 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA). +package iana // import "golang.org/x/net/internal/iana" + +// Differentiated Services Field Codepoints (DSCP), Updated: 2017-05-12 +const ( + DiffServCS0 = 0x0 // CS0 + DiffServCS1 = 0x20 // CS1 + DiffServCS2 = 0x40 // CS2 + DiffServCS3 = 0x60 // CS3 + DiffServCS4 = 0x80 // CS4 + DiffServCS5 = 0xa0 // CS5 + DiffServCS6 = 0xc0 // CS6 + DiffServCS7 = 0xe0 // CS7 + DiffServAF11 = 0x28 // AF11 + DiffServAF12 = 0x30 // AF12 + DiffServAF13 = 0x38 // AF13 + DiffServAF21 = 0x48 // AF21 + DiffServAF22 = 0x50 // AF22 + DiffServAF23 = 0x58 // AF23 + DiffServAF31 = 0x68 // AF31 + DiffServAF32 = 0x70 // AF32 + DiffServAF33 = 0x78 // AF33 + DiffServAF41 = 0x88 // AF41 + DiffServAF42 = 0x90 // AF42 + DiffServAF43 = 0x98 // AF43 + DiffServEF = 0xb8 // EF + DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT +) + +// IPv4 TOS Byte and IPv6 Traffic Class Octet, Updated: 2001-09-06 +const ( + NotECNTransport = 0x0 // Not-ECT (Not ECN-Capable Transport) + ECNTransport1 = 0x1 // ECT(1) (ECN-Capable Transport(1)) + ECNTransport0 = 0x2 // ECT(0) (ECN-Capable Transport(0)) + CongestionExperienced = 0x3 // CE (Congestion Experienced) +) + +// Protocol Numbers, Updated: 2016-06-22 +const ( + ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number + ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option + ProtocolICMP = 1 // Internet Control Message + ProtocolIGMP = 2 // Internet Group Management + ProtocolGGP = 3 // Gateway-to-Gateway + ProtocolIPv4 = 4 // IPv4 encapsulation + ProtocolST = 5 // Stream + ProtocolTCP = 6 // Transmission Control + ProtocolCBT = 7 // CBT + ProtocolEGP = 8 // Exterior Gateway Protocol + ProtocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP) + ProtocolBBNRCCMON = 10 // BBN RCC Monitoring + ProtocolNVPII = 11 // Network Voice Protocol + ProtocolPUP = 12 // PUP + ProtocolEMCON = 14 // EMCON + ProtocolXNET = 15 // Cross Net Debugger + ProtocolCHAOS = 16 // Chaos + ProtocolUDP = 17 // User Datagram + ProtocolMUX = 18 // Multiplexing + ProtocolDCNMEAS = 19 // DCN Measurement Subsystems + ProtocolHMP = 20 // Host Monitoring + ProtocolPRM = 21 // Packet Radio Measurement + ProtocolXNSIDP = 22 // XEROX NS IDP + ProtocolTRUNK1 = 23 // Trunk-1 + ProtocolTRUNK2 = 24 // Trunk-2 + ProtocolLEAF1 = 25 // Leaf-1 + ProtocolLEAF2 = 26 // Leaf-2 + ProtocolRDP = 27 // Reliable Data Protocol + ProtocolIRTP = 28 // Internet Reliable Transaction + ProtocolISOTP4 = 29 // ISO Transport Protocol Class 4 + ProtocolNETBLT = 30 // Bulk Data Transfer Protocol + ProtocolMFENSP = 31 // MFE Network Services Protocol + ProtocolMERITINP = 32 // MERIT Internodal Protocol + ProtocolDCCP = 33 // Datagram Congestion Control Protocol + Protocol3PC = 34 // Third Party Connect Protocol + ProtocolIDPR = 35 // Inter-Domain Policy Routing Protocol + ProtocolXTP = 36 // XTP + ProtocolDDP = 37 // Datagram Delivery Protocol + ProtocolIDPRCMTP = 38 // IDPR Control Message Transport Proto + ProtocolTPPP = 39 // TP++ Transport Protocol + ProtocolIL = 40 // IL Transport Protocol + ProtocolIPv6 = 41 // IPv6 encapsulation + ProtocolSDRP = 42 // Source Demand Routing Protocol + ProtocolIPv6Route = 43 // Routing Header for IPv6 + ProtocolIPv6Frag = 44 // Fragment Header for IPv6 + ProtocolIDRP = 45 // Inter-Domain Routing Protocol + ProtocolRSVP = 46 // Reservation Protocol + ProtocolGRE = 47 // Generic Routing Encapsulation + ProtocolDSR = 48 // Dynamic Source Routing Protocol + ProtocolBNA = 49 // BNA + ProtocolESP = 50 // Encap Security Payload + ProtocolAH = 51 // Authentication Header + ProtocolINLSP = 52 // Integrated Net Layer Security TUBA + ProtocolNARP = 54 // NBMA Address Resolution Protocol + ProtocolMOBILE = 55 // IP Mobility + ProtocolTLSP = 56 // Transport Layer Security Protocol using Kryptonet key management + ProtocolSKIP = 57 // SKIP + ProtocolIPv6ICMP = 58 // ICMP for IPv6 + ProtocolIPv6NoNxt = 59 // No Next Header for IPv6 + ProtocolIPv6Opts = 60 // Destination Options for IPv6 + ProtocolCFTP = 62 // CFTP + ProtocolSATEXPAK = 64 // SATNET and Backroom EXPAK + ProtocolKRYPTOLAN = 65 // Kryptolan + ProtocolRVD = 66 // MIT Remote Virtual Disk Protocol + ProtocolIPPC = 67 // Internet Pluribus Packet Core + ProtocolSATMON = 69 // SATNET Monitoring + ProtocolVISA = 70 // VISA Protocol + ProtocolIPCV = 71 // Internet Packet Core Utility + ProtocolCPNX = 72 // Computer Protocol Network Executive + ProtocolCPHB = 73 // Computer Protocol Heart Beat + ProtocolWSN = 74 // Wang Span Network + ProtocolPVP = 75 // Packet Video Protocol + ProtocolBRSATMON = 76 // Backroom SATNET Monitoring + ProtocolSUNND = 77 // SUN ND PROTOCOL-Temporary + ProtocolWBMON = 78 // WIDEBAND Monitoring + ProtocolWBEXPAK = 79 // WIDEBAND EXPAK + ProtocolISOIP = 80 // ISO Internet Protocol + ProtocolVMTP = 81 // VMTP + ProtocolSECUREVMTP = 82 // SECURE-VMTP + ProtocolVINES = 83 // VINES + ProtocolTTP = 84 // Transaction Transport Protocol + ProtocolIPTM = 84 // Internet Protocol Traffic Manager + ProtocolNSFNETIGP = 85 // NSFNET-IGP + ProtocolDGP = 86 // Dissimilar Gateway Protocol + ProtocolTCF = 87 // TCF + ProtocolEIGRP = 88 // EIGRP + ProtocolOSPFIGP = 89 // OSPFIGP + ProtocolSpriteRPC = 90 // Sprite RPC Protocol + ProtocolLARP = 91 // Locus Address Resolution Protocol + ProtocolMTP = 92 // Multicast Transport Protocol + ProtocolAX25 = 93 // AX.25 Frames + ProtocolIPIP = 94 // IP-within-IP Encapsulation Protocol + ProtocolSCCSP = 96 // Semaphore Communications Sec. Pro. + ProtocolETHERIP = 97 // Ethernet-within-IP Encapsulation + ProtocolENCAP = 98 // Encapsulation Header + ProtocolGMTP = 100 // GMTP + ProtocolIFMP = 101 // Ipsilon Flow Management Protocol + ProtocolPNNI = 102 // PNNI over IP + ProtocolPIM = 103 // Protocol Independent Multicast + ProtocolARIS = 104 // ARIS + ProtocolSCPS = 105 // SCPS + ProtocolQNX = 106 // QNX + ProtocolAN = 107 // Active Networks + ProtocolIPComp = 108 // IP Payload Compression Protocol + ProtocolSNP = 109 // Sitara Networks Protocol + ProtocolCompaqPeer = 110 // Compaq Peer Protocol + ProtocolIPXinIP = 111 // IPX in IP + ProtocolVRRP = 112 // Virtual Router Redundancy Protocol + ProtocolPGM = 113 // PGM Reliable Transport Protocol + ProtocolL2TP = 115 // Layer Two Tunneling Protocol + ProtocolDDX = 116 // D-II Data Exchange (DDX) + ProtocolIATP = 117 // Interactive Agent Transfer Protocol + ProtocolSTP = 118 // Schedule Transfer Protocol + ProtocolSRP = 119 // SpectraLink Radio Protocol + ProtocolUTI = 120 // UTI + ProtocolSMP = 121 // Simple Message Protocol + ProtocolPTP = 123 // Performance Transparency Protocol + ProtocolISIS = 124 // ISIS over IPv4 + ProtocolFIRE = 125 // FIRE + ProtocolCRTP = 126 // Combat Radio Transport Protocol + ProtocolCRUDP = 127 // Combat Radio User Datagram + ProtocolSSCOPMCE = 128 // SSCOPMCE + ProtocolIPLT = 129 // IPLT + ProtocolSPS = 130 // Secure Packet Shield + ProtocolPIPE = 131 // Private IP Encapsulation within IP + ProtocolSCTP = 132 // Stream Control Transmission Protocol + ProtocolFC = 133 // Fibre Channel + ProtocolRSVPE2EIGNORE = 134 // RSVP-E2E-IGNORE + ProtocolMobilityHeader = 135 // Mobility Header + ProtocolUDPLite = 136 // UDPLite + ProtocolMPLSinIP = 137 // MPLS-in-IP + ProtocolMANET = 138 // MANET Protocols + ProtocolHIP = 139 // Host Identity Protocol + ProtocolShim6 = 140 // Shim6 Protocol + ProtocolWESP = 141 // Wrapped Encapsulating Security Payload + ProtocolROHC = 142 // Robust Header Compression + ProtocolReserved = 255 // Reserved +) diff --git a/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/golang.org/x/net/internal/iana/gen.go new file mode 100644 index 000000000..2a5c310c2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/gen.go @@ -0,0 +1,293 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates internet protocol constants and tables by +// reading IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "strconv" + "strings" +) + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/dscp-registry/dscp-registry.xml", + parseDSCPRegistry, + }, + { + "https://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml", + parseTOSTCByte, + }, + { + "https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml", + parseProtocolNumbers, + }, +} + +func main() { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n") + fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url) + os.Exit(1) + } + if err := r.parse(&bb, resp.Body); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := ioutil.WriteFile("const.go", b, 0644); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func parseDSCPRegistry(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var dr dscpRegistry + if err := dec.Decode(&dr); err != nil { + return err + } + drs := dr.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated) + fmt.Fprintf(w, "const (\n") + for _, dr := range drs { + fmt.Fprintf(w, "DiffServ%s = %#x", dr.Name, dr.Value) + fmt.Fprintf(w, "// %s\n", dr.OrigName) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type dscpRegistry struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + RegTitle string `xml:"registry>title"` + PoolRecords []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"registry>record"` + Records []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"registry>registry>record"` +} + +type canonDSCPRecord struct { + OrigName string + Name string + Value int +} + +func (drr *dscpRegistry) escape() []canonDSCPRecord { + drs := make([]canonDSCPRecord, len(drr.Records)) + sr := strings.NewReplacer( + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, dr := range drr.Records { + s := strings.TrimSpace(dr.Name) + drs[i].OrigName = s + drs[i].Name = sr.Replace(s) + n, err := strconv.ParseUint(dr.Space, 2, 8) + if err != nil { + continue + } + drs[i].Value = int(n) << 2 + } + return drs +} + +func parseTOSTCByte(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var ttb tosTCByte + if err := dec.Decode(&ttb); err != nil { + return err + } + trs := ttb.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", ttb.Title, ttb.Updated) + fmt.Fprintf(w, "const (\n") + for _, tr := range trs { + fmt.Fprintf(w, "%s = %#x", tr.Keyword, tr.Value) + fmt.Fprintf(w, "// %s\n", tr.OrigKeyword) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type tosTCByte struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + RegTitle string `xml:"registry>title"` + Records []struct { + Binary string `xml:"binary"` + Keyword string `xml:"keyword"` + } `xml:"registry>record"` +} + +type canonTOSTCByteRecord struct { + OrigKeyword string + Keyword string + Value int +} + +func (ttb *tosTCByte) escape() []canonTOSTCByteRecord { + trs := make([]canonTOSTCByteRecord, len(ttb.Records)) + sr := strings.NewReplacer( + "Capable", "", + "(", "", + ")", "", + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, tr := range ttb.Records { + s := strings.TrimSpace(tr.Keyword) + trs[i].OrigKeyword = s + ss := strings.Split(s, " ") + if len(ss) > 1 { + trs[i].Keyword = strings.Join(ss[1:], " ") + } else { + trs[i].Keyword = ss[0] + } + trs[i].Keyword = sr.Replace(trs[i].Keyword) + n, err := strconv.ParseUint(tr.Binary, 2, 8) + if err != nil { + continue + } + trs[i].Value = int(n) + } + return trs +} + +func parseProtocolNumbers(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var pn protocolNumbers + if err := dec.Decode(&pn); err != nil { + return err + } + prs := pn.escape() + prs = append([]canonProtocolRecord{{ + Name: "IP", + Descr: "IPv4 encapsulation, pseudo protocol number", + Value: 0, + }}, prs...) + fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value) + s := pr.Descr + if s == "" { + s = pr.OrigName + } + fmt.Fprintf(w, "// %s\n", s) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type protocolNumbers struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + RegTitle string `xml:"registry>title"` + Note string `xml:"registry>note"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + Descr string `xml:"description"` + } `xml:"registry>record"` +} + +type canonProtocolRecord struct { + OrigName string + Name string + Descr string + Value int +} + +func (pn *protocolNumbers) escape() []canonProtocolRecord { + prs := make([]canonProtocolRecord, len(pn.Records)) + sr := strings.NewReplacer( + "-in-", "in", + "-within-", "within", + "-over-", "over", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range pn.Records { + if strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "deprecated") { + continue + } + prs[i].OrigName = pr.Name + s := strings.TrimSpace(pr.Name) + switch pr.Name { + case "ISIS over IPv4": + prs[i].Name = "ISIS" + case "manet": + prs[i].Name = "MANET" + default: + prs[i].Name = sr.Replace(s) + } + ss := strings.Split(pr.Descr, "\n") + for i := range ss { + ss[i] = strings.TrimSpace(ss[i]) + } + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_bsd.go b/vendor/golang.org/x/net/internal/nettest/helper_bsd.go new file mode 100644 index 000000000..a6e433b58 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_bsd.go @@ -0,0 +1,53 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package nettest + +import ( + "runtime" + "strconv" + "strings" + "syscall" +) + +var darwinVersion int + +func init() { + if runtime.GOOS == "darwin" { + // See http://support.apple.com/kb/HT1633. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + darwinVersion, _ = strconv.Atoi(ss[0]) + } +} + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + switch runtime.GOOS { + case "freebsd": + // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065. + // Even after the fix, it looks like the latest + // kernels don't deliver link-local scoped multicast + // packets correctly. + return false + case "darwin": + return !causesIPv6Crash() + default: + return true + } +} + +func causesIPv6Crash() bool { + // We see some kernel crash when running IPv6 with IP-level + // options on Darwin kernel version 12 or below. + // See golang.org/issues/17015. + return darwinVersion < 13 +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go b/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go new file mode 100644 index 000000000..bc7da5e0d --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux solaris + +package nettest + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return true +} + +func causesIPv6Crash() bool { + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_posix.go b/vendor/golang.org/x/net/internal/nettest/helper_posix.go new file mode 100644 index 000000000..963ed9965 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_posix.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package nettest + +import ( + "os" + "syscall" +) + +func protocolNotSupported(err error) bool { + switch err := err.(type) { + case syscall.Errno: + switch err { + case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT: + return true + } + case *os.SyscallError: + switch err := err.Err.(type) { + case syscall.Errno: + switch err { + case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT: + return true + } + } + } + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_stub.go b/vendor/golang.org/x/net/internal/nettest/helper_stub.go new file mode 100644 index 000000000..ea61b6f39 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_stub.go @@ -0,0 +1,32 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package nettest + +import ( + "fmt" + "runtime" +) + +func maxOpenFiles() int { + return defaultMaxOpenFiles +} + +func supportsRawIPSocket() (string, bool) { + return fmt.Sprintf("not supported on %s", runtime.GOOS), false +} + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return false +} + +func causesIPv6Crash() bool { + return false +} + +func protocolNotSupported(err error) bool { + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_unix.go b/vendor/golang.org/x/net/internal/nettest/helper_unix.go new file mode 100644 index 000000000..ed13e448b --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_unix.go @@ -0,0 +1,29 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package nettest + +import ( + "fmt" + "os" + "runtime" + "syscall" +) + +func maxOpenFiles() int { + var rlim syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil { + return defaultMaxOpenFiles + } + return int(rlim.Cur) +} + +func supportsRawIPSocket() (string, bool) { + if os.Getuid() != 0 { + return fmt.Sprintf("must be root on %s", runtime.GOOS), false + } + return "", true +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_windows.go b/vendor/golang.org/x/net/internal/nettest/helper_windows.go new file mode 100644 index 000000000..3dcb727c9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_windows.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +import ( + "fmt" + "runtime" + "syscall" +) + +func maxOpenFiles() int { + return 4 * defaultMaxOpenFiles /* actually it's 16581375 */ +} + +func supportsRawIPSocket() (string, bool) { + // From http://msdn.microsoft.com/en-us/library/windows/desktop/ms740548.aspx: + // Note: To use a socket of type SOCK_RAW requires administrative privileges. + // Users running Winsock applications that use raw sockets must be a member of + // the Administrators group on the local computer, otherwise raw socket calls + // will fail with an error code of WSAEACCES. On Windows Vista and later, access + // for raw sockets is enforced at socket creation. In earlier versions of Windows, + // access for raw sockets is enforced during other socket operations. + s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, 0) + if err == syscall.WSAEACCES { + return fmt.Sprintf("no access to raw socket allowed on %s", runtime.GOOS), false + } + if err != nil { + return err.Error(), false + } + syscall.Closesocket(s) + return "", true +} + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return true +} + +func causesIPv6Crash() bool { + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/interface.go b/vendor/golang.org/x/net/internal/nettest/interface.go new file mode 100644 index 000000000..8e6333afe --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/interface.go @@ -0,0 +1,94 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +import "net" + +// IsMulticastCapable reports whether ifi is an IP multicast-capable +// network interface. Network must be "ip", "ip4" or "ip6". +func IsMulticastCapable(network string, ifi *net.Interface) (net.IP, bool) { + switch network { + case "ip", "ip4", "ip6": + default: + return nil, false + } + if ifi == nil || ifi.Flags&net.FlagUp == 0 || ifi.Flags&net.FlagMulticast == 0 { + return nil, false + } + return hasRoutableIP(network, ifi) +} + +// RoutedInterface returns a network interface that can route IP +// traffic and satisfies flags. It returns nil when an appropriate +// network interface is not found. Network must be "ip", "ip4" or +// "ip6". +func RoutedInterface(network string, flags net.Flags) *net.Interface { + switch network { + case "ip", "ip4", "ip6": + default: + return nil + } + ift, err := net.Interfaces() + if err != nil { + return nil + } + for _, ifi := range ift { + if ifi.Flags&flags != flags { + continue + } + if _, ok := hasRoutableIP(network, &ifi); !ok { + continue + } + return &ifi + } + return nil +} + +func hasRoutableIP(network string, ifi *net.Interface) (net.IP, bool) { + ifat, err := ifi.Addrs() + if err != nil { + return nil, false + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := routableIP(network, ifa.IP); ip != nil { + return ip, true + } + case *net.IPNet: + if ip := routableIP(network, ifa.IP); ip != nil { + return ip, true + } + } + } + return nil, false +} + +func routableIP(network string, ip net.IP) net.IP { + if !ip.IsLoopback() && !ip.IsLinkLocalUnicast() && !ip.IsGlobalUnicast() { + return nil + } + switch network { + case "ip4": + if ip := ip.To4(); ip != nil { + return ip + } + case "ip6": + if ip.IsLoopback() { // addressing scope of the loopback address depends on each implementation + return nil + } + if ip := ip.To16(); ip != nil && ip.To4() == nil { + return ip + } + default: + if ip := ip.To4(); ip != nil { + return ip + } + if ip := ip.To16(); ip != nil { + return ip + } + } + return nil +} diff --git a/vendor/golang.org/x/net/internal/nettest/rlimit.go b/vendor/golang.org/x/net/internal/nettest/rlimit.go new file mode 100644 index 000000000..bb34aec0b --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/rlimit.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +const defaultMaxOpenFiles = 256 + +// MaxOpenFiles returns the maximum number of open files for the +// caller's process. +func MaxOpenFiles() int { return maxOpenFiles() } diff --git a/vendor/golang.org/x/net/internal/nettest/stack.go b/vendor/golang.org/x/net/internal/nettest/stack.go new file mode 100644 index 000000000..06f4e09ef --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/stack.go @@ -0,0 +1,152 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nettest provides utilities for network testing. +package nettest // import "golang.org/x/net/internal/nettest" + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "runtime" +) + +var ( + supportsIPv4 bool + supportsIPv6 bool +) + +func init() { + if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil { + ln.Close() + supportsIPv4 = true + } + if ln, err := net.Listen("tcp6", "[::1]:0"); err == nil { + ln.Close() + supportsIPv6 = true + } +} + +// SupportsIPv4 reports whether the platform supports IPv4 networking +// functionality. +func SupportsIPv4() bool { return supportsIPv4 } + +// SupportsIPv6 reports whether the platform supports IPv6 networking +// functionality. +func SupportsIPv6() bool { return supportsIPv6 } + +// SupportsRawIPSocket reports whether the platform supports raw IP +// sockets. +func SupportsRawIPSocket() (string, bool) { + return supportsRawIPSocket() +} + +// SupportsIPv6MulticastDeliveryOnLoopback reports whether the +// platform supports IPv6 multicast packet delivery on software +// loopback interface. +func SupportsIPv6MulticastDeliveryOnLoopback() bool { + return supportsIPv6MulticastDeliveryOnLoopback() +} + +// ProtocolNotSupported reports whether err is a protocol not +// supported error. +func ProtocolNotSupported(err error) bool { + return protocolNotSupported(err) +} + +// TestableNetwork reports whether network is testable on the current +// platform configuration. +func TestableNetwork(network string) bool { + // This is based on logic from standard library's + // net/platform_test.go. + switch network { + case "unix", "unixgram": + switch runtime.GOOS { + case "android", "nacl", "plan9", "windows": + return false + } + if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { + return false + } + case "unixpacket": + switch runtime.GOOS { + case "android", "darwin", "freebsd", "nacl", "plan9", "windows": + return false + case "netbsd": + // It passes on amd64 at least. 386 fails (Issue 22927). arm is unknown. + if runtime.GOARCH == "386" { + return false + } + } + } + return true +} + +// NewLocalListener returns a listener which listens to a loopback IP +// address or local file system path. +// Network must be "tcp", "tcp4", "tcp6", "unix" or "unixpacket". +func NewLocalListener(network string) (net.Listener, error) { + switch network { + case "tcp": + if supportsIPv4 { + if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil { + return ln, nil + } + } + if supportsIPv6 { + return net.Listen("tcp6", "[::1]:0") + } + case "tcp4": + if supportsIPv4 { + return net.Listen("tcp4", "127.0.0.1:0") + } + case "tcp6": + if supportsIPv6 { + return net.Listen("tcp6", "[::1]:0") + } + case "unix", "unixpacket": + return net.Listen(network, localPath()) + } + return nil, fmt.Errorf("%s is not supported", network) +} + +// NewLocalPacketListener returns a packet listener which listens to a +// loopback IP address or local file system path. +// Network must be "udp", "udp4", "udp6" or "unixgram". +func NewLocalPacketListener(network string) (net.PacketConn, error) { + switch network { + case "udp": + if supportsIPv4 { + if c, err := net.ListenPacket("udp4", "127.0.0.1:0"); err == nil { + return c, nil + } + } + if supportsIPv6 { + return net.ListenPacket("udp6", "[::1]:0") + } + case "udp4": + if supportsIPv4 { + return net.ListenPacket("udp4", "127.0.0.1:0") + } + case "udp6": + if supportsIPv6 { + return net.ListenPacket("udp6", "[::1]:0") + } + case "unixgram": + return net.ListenPacket(network, localPath()) + } + return nil, fmt.Errorf("%s is not supported", network) +} + +func localPath() string { + f, err := ioutil.TempFile("", "nettest") + if err != nil { + panic(err) + } + path := f.Name() + f.Close() + os.Remove(path) + return path +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/vendor/golang.org/x/net/internal/socket/cmsghdr.go new file mode 100644 index 000000000..1eb07d26d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +func (h *cmsghdr) len() int { return int(h.Len) } +func (h *cmsghdr) lvl() int { return int(h.Level) } +func (h *cmsghdr) typ() int { return int(h.Type) } diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go new file mode 100644 index 000000000..d1d0c2de5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go new file mode 100644 index 000000000..bac66811d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go new file mode 100644 index 000000000..63f0534fa --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint64(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go new file mode 100644 index 000000000..7dedd430e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go new file mode 100644 index 000000000..a4e71226f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type cmsghdr struct{} + +const sizeofCmsghdr = 0 + +func (h *cmsghdr) len() int { return 0 } +func (h *cmsghdr) lvl() int { return 0 } +func (h *cmsghdr) typ() int { return 0 } + +func (h *cmsghdr) set(l, lvl, typ int) {} diff --git a/vendor/golang.org/x/net/internal/socket/defs_darwin.go b/vendor/golang.org/x/net/internal/socket/defs_darwin.go new file mode 100644 index 000000000..14e28c0b4 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_darwin.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go new file mode 100644 index 000000000..14e28c0b4 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_freebsd.go b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go new file mode 100644 index 000000000..14e28c0b4 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_linux.go b/vendor/golang.org/x/net/internal/socket/defs_linux.go new file mode 100644 index 000000000..ce9ec2f6d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_linux.go @@ -0,0 +1,49 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include +#include + +#define _GNU_SOURCE +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofMmsghdr = C.sizeof_struct_mmsghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_netbsd.go b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go new file mode 100644 index 000000000..3f8433569 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go @@ -0,0 +1,47 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofMmsghdr = C.sizeof_struct_mmsghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_openbsd.go b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go new file mode 100644 index 000000000..14e28c0b4 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_solaris.go b/vendor/golang.org/x/net/internal/socket/defs_solaris.go new file mode 100644 index 000000000..14e28c0b4 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_solaris.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/error_unix.go b/vendor/golang.org/x/net/internal/socket/error_unix.go new file mode 100644 index 000000000..93dff9180 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -0,0 +1,31 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +import "syscall" + +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.EAGAIN: + return errEAGAIN + case syscall.EINVAL: + return errEINVAL + case syscall.ENOENT: + return errENOENT + } + return errno +} diff --git a/vendor/golang.org/x/net/internal/socket/error_windows.go b/vendor/golang.org/x/net/internal/socket/error_windows.go new file mode 100644 index 000000000..6a6379a8b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/error_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "syscall" + +var ( + errERROR_IO_PENDING error = syscall.ERROR_IO_PENDING + errEINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.ERROR_IO_PENDING: + return errERROR_IO_PENDING + case syscall.EINVAL: + return errEINVAL + } + return errno +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go new file mode 100644 index 000000000..05d6082d1 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go new file mode 100644 index 000000000..afb34ad58 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go new file mode 100644 index 000000000..8d17a40c4 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*int8)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/vendor/golang.org/x/net/internal/socket/iovec_stub.go new file mode 100644 index 000000000..c87d2a933 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type iovec struct{} + +func (v *iovec) set(b []byte) {} diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go new file mode 100644 index 000000000..2e80a9cb7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,!netbsd + +package socket + +import "net" + +type mmsghdr struct{} + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go new file mode 100644 index 000000000..3c42ea7ad --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -0,0 +1,42 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux netbsd + +package socket + +import "net" + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + for i := range hs { + vs := make([]iovec, len(ms[i].Buffers)) + var sa []byte + if parseFn != nil { + sa = make([]byte, sizeofSockaddrInet6) + } + if marshalFn != nil { + sa = marshalFn(ms[i].Addr) + } + hs[i].Hdr.pack(vs, ms[i].Buffers, ms[i].OOB, sa) + } + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + for i := range hs { + ms[i].N = int(hs[i].Len) + ms[i].NN = hs[i].Hdr.controllen() + ms[i].Flags = hs[i].Hdr.flags() + if parseFn != nil { + var err error + ms[i].Addr, err = parseFn(hs[i].Hdr.name(), hint) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go new file mode 100644 index 000000000..5567afc88 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go @@ -0,0 +1,39 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.Control = (*byte)(unsafe.Pointer(&oob[0])) + h.Controllen = uint32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go new file mode 100644 index 000000000..b8c87b72b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = int32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go new file mode 100644 index 000000000..5a38798cc --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.setControl(oob) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go new file mode 100644 index 000000000..a7a5987c8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint32(len(b)) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go new file mode 100644 index 000000000..610fc4f3b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint64(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint64(len(b)) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go new file mode 100644 index 000000000..71a69e251 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go new file mode 100644 index 000000000..6465b2073 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + if len(vs) > 0 { + h.Iov = &vs[0] + h.Iovlen = int32(len(vs)) + } + if len(oob) > 0 { + h.Accrights = (*int8)(unsafe.Pointer(&oob[0])) + h.Accrightslen = int32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) controllen() int { + return int(h.Accrightslen) +} + +func (h *msghdr) flags() int { + return int(NativeEndian.Uint32(h.Pad_cgo_2[:])) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go new file mode 100644 index 000000000..64e817335 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type msghdr struct{} + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) {} +func (h *msghdr) name() []byte { return nil } +func (h *msghdr) controllen() int { return 0 } +func (h *msghdr) flags() int { return 0 } diff --git a/vendor/golang.org/x/net/internal/socket/rawconn.go b/vendor/golang.org/x/net/internal/socket/rawconn.go new file mode 100644 index 000000000..d6871d55f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn.go @@ -0,0 +1,66 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package socket + +import ( + "errors" + "net" + "os" + "syscall" +) + +// A Conn represents a raw connection. +type Conn struct { + network string + c syscall.RawConn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + var err error + var cc Conn + switch c := c.(type) { + case *net.TCPConn: + cc.network = "tcp" + cc.c, err = c.SyscallConn() + case *net.UDPConn: + cc.network = "udp" + cc.c, err = c.SyscallConn() + case *net.IPConn: + cc.network = "ip" + cc.c, err = c.SyscallConn() + default: + return nil, errors.New("unknown connection type") + } + if err != nil { + return nil, err + } + return &cc, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + var operr error + var n int + fn := func(s uintptr) { + n, operr = getsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return 0, err + } + return n, os.NewSyscallError("getsockopt", operr) +} + +func (o *Option) set(c *Conn, b []byte) error { + var operr error + fn := func(s uintptr) { + operr = setsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return err + } + return os.NewSyscallError("setsockopt", operr) +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go new file mode 100644 index 000000000..499164a3f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -0,0 +1,74 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build linux + +package socket + +import ( + "net" + "os" + "syscall" +) + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var parseFn func([]byte, string) (net.Addr, error) + if c.network != "tcp" { + parseFn = parseInetAddr + } + if err := hs.pack(ms, parseFn, nil); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("recvmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], parseFn, c.network); err != nil { + return n, err + } + return n, nil +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var marshalFn func(net.Addr) []byte + if c.network != "tcp" { + marshalFn = marshalInetAddr + } + if err := hs.pack(ms, nil, marshalFn); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("sendmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], nil, ""); err != nil { + return n, err + } + return n, nil +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go new file mode 100644 index 000000000..b21d2e641 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "os" + "syscall" +) + +func (c *Conn) recvMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if c.network != "tcp" { + sa = make([]byte, sizeofSockaddrInet6) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("recvmsg", operr) + } + if c.network != "tcp" { + var err error + m.Addr, err = parseInetAddr(sa[:], c.network) + if err != nil { + return err + } + } + m.N = n + m.NN = h.controllen() + m.Flags = h.flags() + return nil +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if m.Addr != nil { + sa = marshalInetAddr(m.Addr) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("sendmsg", operr) + } + m.N = n + m.NN = len(m.OOB) + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go new file mode 100644 index 000000000..f78832aa4 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !linux + +package socket + +import "errors" + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go new file mode 100644 index 000000000..96733cbe1 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import "errors" + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_stub.go b/vendor/golang.org/x/net/internal/socket/rawconn_stub.go new file mode 100644 index 000000000..d2add1a0a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package socket + +import "errors" + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/reflect.go b/vendor/golang.org/x/net/internal/socket/reflect.go new file mode 100644 index 000000000..bb179f11d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/reflect.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package socket + +import ( + "errors" + "net" + "os" + "reflect" + "runtime" +) + +// A Conn represents a raw connection. +type Conn struct { + c net.Conn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + return &Conn{c: c}, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + s, err := socketOf(c.c) + if err != nil { + return 0, err + } + n, err := getsockopt(s, o.Level, o.Name, b) + return n, os.NewSyscallError("getsockopt", err) +} + +func (o *Option) set(c *Conn, b []byte) error { + s, err := socketOf(c.c) + if err != nil { + return err + } + return os.NewSyscallError("setsockopt", setsockopt(s, o.Level, o.Name, b)) +} + +func socketOf(c net.Conn) (uintptr, error) { + switch c.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + v := reflect.ValueOf(c) + switch e := v.Elem(); e.Kind() { + case reflect.Struct: + fd := e.FieldByName("conn").FieldByName("fd") + switch e := fd.Elem(); e.Kind() { + case reflect.Struct: + sysfd := e.FieldByName("sysfd") + if runtime.GOOS == "windows" { + return uintptr(sysfd.Uint()), nil + } + return uintptr(sysfd.Int()), nil + } + } + } + return 0, errors.New("invalid type") +} diff --git a/vendor/golang.org/x/net/internal/socket/socket.go b/vendor/golang.org/x/net/internal/socket/socket.go new file mode 100644 index 000000000..5f9730e6d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socket provides a portable interface for socket system +// calls. +package socket // import "golang.org/x/net/internal/socket" + +import ( + "errors" + "net" + "unsafe" +) + +// An Option represents a sticky socket option. +type Option struct { + Level int // level + Name int // name; must be equal or greater than 1 + Len int // length of value in bytes; must be equal or greater than 1 +} + +// Get reads a value for the option from the kernel. +// It returns the number of bytes written into b. +func (o *Option) Get(c *Conn, b []byte) (int, error) { + if o.Name < 1 || o.Len < 1 { + return 0, errors.New("invalid option") + } + if len(b) < o.Len { + return 0, errors.New("short buffer") + } + return o.get(c, b) +} + +// GetInt returns an integer value for the option. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) GetInt(c *Conn) (int, error) { + if o.Len != 1 && o.Len != 4 { + return 0, errors.New("invalid option") + } + var b []byte + var bb [4]byte + if o.Len == 1 { + b = bb[:1] + } else { + b = bb[:4] + } + n, err := o.get(c, b) + if err != nil { + return 0, err + } + if n != o.Len { + return 0, errors.New("invalid option length") + } + if o.Len == 1 { + return int(b[0]), nil + } + return int(NativeEndian.Uint32(b[:4])), nil +} + +// Set writes the option and value to the kernel. +func (o *Option) Set(c *Conn, b []byte) error { + if o.Name < 1 || o.Len < 1 { + return errors.New("invalid option") + } + if len(b) < o.Len { + return errors.New("short buffer") + } + return o.set(c, b) +} + +// SetInt writes the option and value to the kernel. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) SetInt(c *Conn, v int) error { + if o.Len != 1 && o.Len != 4 { + return errors.New("invalid option") + } + var b []byte + if o.Len == 1 { + b = []byte{byte(v)} + } else { + var bb [4]byte + NativeEndian.PutUint32(bb[:o.Len], uint32(v)) + b = bb[:4] + } + return o.set(c, b) +} + +func controlHeaderLen() int { + return roundup(sizeofCmsghdr) +} + +func controlMessageLen(dataLen int) int { + return roundup(sizeofCmsghdr) + dataLen +} + +// ControlMessageSpace returns the whole length of control message. +func ControlMessageSpace(dataLen int) int { + return roundup(sizeofCmsghdr) + roundup(dataLen) +} + +// A ControlMessage represents the head message in a stream of control +// messages. +// +// A control message comprises of a header, data and a few padding +// fields to conform to the interface to the kernel. +// +// See RFC 3542 for further information. +type ControlMessage []byte + +// Data returns the data field of the control message at the head on +// m. +func (m ControlMessage) Data(dataLen int) []byte { + l := controlHeaderLen() + if len(m) < l || len(m) < l+dataLen { + return nil + } + return m[l : l+dataLen] +} + +// Next returns the control message at the next on m. +// +// Next works only for standard control messages. +func (m ControlMessage) Next(dataLen int) ControlMessage { + l := ControlMessageSpace(dataLen) + if len(m) < l { + return nil + } + return m[l:] +} + +// MarshalHeader marshals the header fields of the control message at +// the head on m. +func (m ControlMessage) MarshalHeader(lvl, typ, dataLen int) error { + if len(m) < controlHeaderLen() { + return errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(dataLen), lvl, typ) + return nil +} + +// ParseHeader parses and returns the header fields of the control +// message at the head on m. +func (m ControlMessage) ParseHeader() (lvl, typ, dataLen int, err error) { + l := controlHeaderLen() + if len(m) < l { + return 0, 0, 0, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + return h.lvl(), h.typ(), int(uint64(h.len()) - uint64(l)), nil +} + +// Marshal marshals the control message at the head on m, and returns +// the next control message. +func (m ControlMessage) Marshal(lvl, typ int, data []byte) (ControlMessage, error) { + l := len(data) + if len(m) < ControlMessageSpace(l) { + return nil, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(l), lvl, typ) + if l > 0 { + copy(m.Data(l), data) + } + return m.Next(l), nil +} + +// Parse parses m as a single or multiple control messages. +// +// Parse works for both standard and compatible messages. +func (m ControlMessage) Parse() ([]ControlMessage, error) { + var ms []ControlMessage + for len(m) >= controlHeaderLen() { + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + l := h.len() + if l <= 0 { + return nil, errors.New("invalid header length") + } + if uint64(l) < uint64(controlHeaderLen()) { + return nil, errors.New("invalid message length") + } + if uint64(l) > uint64(len(m)) { + return nil, errors.New("short buffer") + } + // On message reception: + // + // |<- ControlMessageSpace --------------->| + // |<- controlMessageLen ---------->| | + // |<- controlHeaderLen ->| | | + // +---------------+------+---------+------+ + // | Header | PadH | Data | PadD | + // +---------------+------+---------+------+ + // + // On compatible message reception: + // + // | ... |<- controlMessageLen ----------->| + // | ... |<- controlHeaderLen ->| | + // +-----+---------------+------+----------+ + // | ... | Header | PadH | Data | + // +-----+---------------+------+----------+ + ms = append(ms, ControlMessage(m[:l])) + ll := l - controlHeaderLen() + if len(m) >= ControlMessageSpace(ll) { + m = m[ControlMessageSpace(ll):] + } else { + m = m[controlMessageLen(ll):] + } + } + return ms, nil +} + +// NewControlMessage returns a new stream of control messages. +func NewControlMessage(dataLen []int) ControlMessage { + var l int + for i := range dataLen { + l += ControlMessageSpace(dataLen[i]) + } + return make([]byte, l) +} + +// A Message represents an IO message. +type Message struct { + // When writing, the Buffers field must contain at least one + // byte to write. + // When reading, the Buffers field will always contain a byte + // to read. + Buffers [][]byte + + // OOB contains protocol-specific control or miscellaneous + // ancillary data known as out-of-band data. + OOB []byte + + // Addr specifies a destination address when writing. + // It can be nil when the underlying protocol of the raw + // connection uses connection-oriented communication. + // After a successful read, it may contain the source address + // on the received packet. + Addr net.Addr + + N int // # of bytes read or written from/to Buffers + NN int // # of bytes read or written from/to OOB + Flags int // protocol-specific information on the received message +} + +// RecvMsg wraps recvmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +func (c *Conn) RecvMsg(m *Message, flags int) error { + return c.recvMsg(m, flags) +} + +// SendMsg wraps sendmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +func (c *Conn) SendMsg(m *Message, flags int) error { + return c.sendMsg(m, flags) +} + +// RecvMsgs wraps recvmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// Only Linux supports this. +func (c *Conn) RecvMsgs(ms []Message, flags int) (int, error) { + return c.recvMsgs(ms, flags) +} + +// SendMsgs wraps sendmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// Only Linux supports this. +func (c *Conn) SendMsgs(ms []Message, flags int) (int, error) { + return c.sendMsgs(ms, flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go b/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go new file mode 100644 index 000000000..c4edd4a8d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go @@ -0,0 +1,259 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/internal/socket" +) + +type mockControl struct { + Level int + Type int + Data []byte +} + +func TestControlMessage(t *testing.T) { + for _, tt := range []struct { + cs []mockControl + }{ + { + []mockControl{ + {Level: 1, Type: 1}, + }, + }, + { + []mockControl{ + {Level: 2, Type: 2, Data: []byte{0xfe}}, + }, + }, + { + []mockControl{ + {Level: 3, Type: 3, Data: []byte{0xfe, 0xff, 0xff, 0xfe}}, + }, + }, + { + []mockControl{ + {Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}}, + }, + }, + { + []mockControl{ + {Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}}, + {Level: 2, Type: 2, Data: []byte{0xfe}}, + }, + }, + } { + var w []byte + var tailPadLen int + mm := socket.NewControlMessage([]int{0}) + for i, c := range tt.cs { + m := socket.NewControlMessage([]int{len(c.Data)}) + l := len(m) - len(mm) + if i == len(tt.cs)-1 && l > len(c.Data) { + tailPadLen = l - len(c.Data) + } + w = append(w, m...) + } + + var err error + ww := make([]byte, len(w)) + copy(ww, w) + m := socket.ControlMessage(ww) + for _, c := range tt.cs { + if err = m.MarshalHeader(c.Level, c.Type, len(c.Data)); err != nil { + t.Fatalf("(%v).MarshalHeader() = %v", tt.cs, err) + } + copy(m.Data(len(c.Data)), c.Data) + m = m.Next(len(c.Data)) + } + m = socket.ControlMessage(w) + for _, c := range tt.cs { + m, err = m.Marshal(c.Level, c.Type, c.Data) + if err != nil { + t.Fatalf("(%v).Marshal() = %v", tt.cs, err) + } + } + if !bytes.Equal(ww, w) { + t.Fatalf("got %#v; want %#v", ww, w) + } + + ws := [][]byte{w} + if tailPadLen > 0 { + // Test a message with no tail padding. + nopad := w[:len(w)-tailPadLen] + ws = append(ws, [][]byte{nopad}...) + } + for _, w := range ws { + ms, err := socket.ControlMessage(w).Parse() + if err != nil { + t.Fatalf("(%v).Parse() = %v", tt.cs, err) + } + for i, m := range ms { + lvl, typ, dataLen, err := m.ParseHeader() + if err != nil { + t.Fatalf("(%v).ParseHeader() = %v", tt.cs, err) + } + if lvl != tt.cs[i].Level || typ != tt.cs[i].Type || dataLen != len(tt.cs[i].Data) { + t.Fatalf("%v: got %d, %d, %d; want %d, %d, %d", tt.cs[i], lvl, typ, dataLen, tt.cs[i].Level, tt.cs[i].Type, len(tt.cs[i].Data)) + } + } + } + } +} + +func TestUDP(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + t.Fatal(err) + } + + t.Run("Message", func(t *testing.T) { + data := []byte("HELLO-R-U-THERE") + wm := socket.Message{ + Buffers: bytes.SplitAfter(data, []byte("-")), + Addr: c.LocalAddr(), + } + if err := cc.SendMsg(&wm, 0); err != nil { + t.Fatal(err) + } + b := make([]byte, 32) + rm := socket.Message{ + Buffers: [][]byte{b[:1], b[1:3], b[3:7], b[7:11], b[11:]}, + } + if err := cc.RecvMsg(&rm, 0); err != nil { + t.Fatal(err) + } + if !bytes.Equal(b[:rm.N], data) { + t.Fatalf("got %#v; want %#v", b[:rm.N], data) + } + }) + switch runtime.GOOS { + case "android", "linux": + t.Run("Messages", func(t *testing.T) { + data := []byte("HELLO-R-U-THERE") + wmbs := bytes.SplitAfter(data, []byte("-")) + wms := []socket.Message{ + {Buffers: wmbs[:1], Addr: c.LocalAddr()}, + {Buffers: wmbs[1:], Addr: c.LocalAddr()}, + } + n, err := cc.SendMsgs(wms, 0) + if err != nil { + t.Fatal(err) + } + if n != len(wms) { + t.Fatalf("got %d; want %d", n, len(wms)) + } + b := make([]byte, 32) + rmbs := [][][]byte{{b[:len(wmbs[0])]}, {b[len(wmbs[0]):]}} + rms := []socket.Message{ + {Buffers: rmbs[0]}, + {Buffers: rmbs[1]}, + } + n, err = cc.RecvMsgs(rms, 0) + if err != nil { + t.Fatal(err) + } + if n != len(rms) { + t.Fatalf("got %d; want %d", n, len(rms)) + } + nn := 0 + for i := 0; i < n; i++ { + nn += rms[i].N + } + if !bytes.Equal(b[:nn], data) { + t.Fatalf("got %#v; want %#v", b[:nn], data) + } + }) + } + + // The behavior of transmission for zero byte paylaod depends + // on each platform implementation. Some may transmit only + // protocol header and options, other may transmit nothing. + // We test only that SendMsg and SendMsgs will not crash with + // empty buffers. + wm := socket.Message{ + Buffers: [][]byte{{}}, + Addr: c.LocalAddr(), + } + cc.SendMsg(&wm, 0) + wms := []socket.Message{ + {Buffers: [][]byte{{}}, Addr: c.LocalAddr()}, + } + cc.SendMsgs(wms, 0) +} + +func BenchmarkUDP(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + b.Fatal(err) + } + data := []byte("HELLO-R-U-THERE") + wm := socket.Message{ + Buffers: [][]byte{data}, + Addr: c.LocalAddr(), + } + rm := socket.Message{ + Buffers: [][]byte{make([]byte, 128)}, + OOB: make([]byte, 128), + } + + for M := 1; M <= 1<<9; M = M << 1 { + b.Run(fmt.Sprintf("Iter-%d", M), func(b *testing.B) { + for i := 0; i < b.N; i++ { + for j := 0; j < M; j++ { + if err := cc.SendMsg(&wm, 0); err != nil { + b.Fatal(err) + } + if err := cc.RecvMsg(&rm, 0); err != nil { + b.Fatal(err) + } + } + } + }) + switch runtime.GOOS { + case "android", "linux": + wms := make([]socket.Message, M) + for i := range wms { + wms[i].Buffers = [][]byte{data} + wms[i].Addr = c.LocalAddr() + } + rms := make([]socket.Message, M) + for i := range rms { + rms[i].Buffers = [][]byte{make([]byte, 128)} + rms[i].OOB = make([]byte, 128) + } + b.Run(fmt.Sprintf("Batch-%d", M), func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := cc.SendMsgs(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := cc.RecvMsgs(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + } + } +} diff --git a/vendor/golang.org/x/net/internal/socket/socket_test.go b/vendor/golang.org/x/net/internal/socket/socket_test.go new file mode 100644 index 000000000..bf3751b5e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket_test.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket_test + +import ( + "net" + "runtime" + "syscall" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/internal/socket" +) + +func TestSocket(t *testing.T) { + t.Run("Option", func(t *testing.T) { + testSocketOption(t, &socket.Option{Level: syscall.SOL_SOCKET, Name: syscall.SO_RCVBUF, Len: 4}) + }) +} + +func testSocketOption(t *testing.T, so *socket.Option) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + t.Fatal(err) + } + const N = 2048 + if err := so.SetInt(cc, N); err != nil { + t.Fatal(err) + } + n, err := so.GetInt(cc) + if err != nil { + t.Fatal(err) + } + if n < N { + t.Fatalf("got %d; want greater than or equal to %d", n, N) + } +} diff --git a/vendor/golang.org/x/net/internal/socket/sys.go b/vendor/golang.org/x/net/internal/socket/sys.go new file mode 100644 index 000000000..4f0eead13 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "encoding/binary" + "unsafe" +) + +var ( + // NativeEndian is the machine native endian implementation of + // ByteOrder. + NativeEndian binary.ByteOrder + + kernelAlign int +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + NativeEndian = binary.LittleEndian + } else { + NativeEndian = binary.BigEndian + } + kernelAlign = probeProtocolStack() +} + +func roundup(l int) int { + return (l + kernelAlign - 1) & ^(kernelAlign - 1) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/vendor/golang.org/x/net/internal/socket/sys_bsd.go new file mode 100644 index 000000000..f13e14ff3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd openbsd + +package socket + +import "errors" + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go new file mode 100644 index 000000000..f723fa36a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd netbsd openbsd + +package socket + +import "unsafe" + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_darwin.go b/vendor/golang.org/x/net/internal/socket/sys_darwin.go new file mode 100644 index 000000000..b17d223bf --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_darwin.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go new file mode 100644 index 000000000..b17d223bf --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux.go b/vendor/golang.org/x/net/internal/socket/sys_linux.go new file mode 100644 index 000000000..1559521e0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!s390x,!386 + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.go b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go new file mode 100644 index 000000000..235b2cc08 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 4 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.s b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s new file mode 100644 index 000000000..93e7d75ec --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-36 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-36 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go new file mode 100644 index 000000000..9decee2e5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x12b + sysSENDMMSG = 0x133 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go new file mode 100644 index 000000000..d753b436d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x16d + sysSENDMMSG = 0x176 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go new file mode 100644 index 000000000..b67089436 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go new file mode 100644 index 000000000..9c0d74014 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go new file mode 100644 index 000000000..071a4aba8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go new file mode 100644 index 000000000..071a4aba8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go new file mode 100644 index 000000000..9c0d74014 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go new file mode 100644 index 000000000..21c1e3f00 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go new file mode 100644 index 000000000..21c1e3f00 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go new file mode 100644 index 000000000..327979efb --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 8 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s new file mode 100644 index 000000000..06d75628c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-72 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-72 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_netbsd.go b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go new file mode 100644 index 000000000..431851c12 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +const ( + sysRECVMMSG = 0x1db + sysSENDMMSG = 0x1dc +) + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/golang.org/x/net/internal/socket/sys_posix.go new file mode 100644 index 000000000..dc130c27e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -0,0 +1,168 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "encoding/binary" + "errors" + "net" + "runtime" + "strconv" + "sync" + "time" +) + +func marshalInetAddr(a net.Addr) []byte { + switch a := a.(type) { + case *net.TCPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.UDPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.IPAddr: + return marshalSockaddr(a.IP, 0, a.Zone) + default: + return nil + } +} + +func marshalSockaddr(ip net.IP, port int, zone string) []byte { + if ip4 := ip.To4(); ip4 != nil { + b := make([]byte, sizeofSockaddrInet) + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET)) + default: + b[0] = sizeofSockaddrInet + b[1] = sysAF_INET + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[4:8], ip4) + return b + } + if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { + b := make([]byte, sizeofSockaddrInet6) + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET6)) + default: + b[0] = sizeofSockaddrInet6 + b[1] = sysAF_INET6 + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[8:24], ip6) + if zone != "" { + NativeEndian.PutUint32(b[24:28], uint32(zoneCache.index(zone))) + } + return b + } + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + if len(b) < 2 { + return nil, errors.New("invalid address") + } + var af int + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + af = int(NativeEndian.Uint16(b[:2])) + default: + af = int(b[1]) + } + var ip net.IP + var zone string + if af == sysAF_INET { + if len(b) < sizeofSockaddrInet { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv4len) + copy(ip, b[4:8]) + } + if af == sysAF_INET6 { + if len(b) < sizeofSockaddrInet6 { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv6len) + copy(ip, b[8:24]) + if id := int(NativeEndian.Uint32(b[24:28])); id > 0 { + zone = zoneCache.name(id) + } + } + switch network { + case "tcp", "tcp4", "tcp6": + return &net.TCPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + case "udp", "udp4", "udp6": + return &net.UDPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + default: + return &net.IPAddr{IP: ip, Zone: zone}, nil + } +} + +// An ipv6ZoneCache represents a cache holding partial network +// interface information. It is used for reducing the cost of IPv6 +// addressing scope zone resolution. +// +// Multiple names sharing the index are managed by first-come +// first-served basis for consistency. +type ipv6ZoneCache struct { + sync.RWMutex // guard the following + lastFetched time.Time // last time routing information was fetched + toIndex map[string]int // interface name to its index + toName map[int]string // interface index to its name +} + +var zoneCache = ipv6ZoneCache{ + toIndex: make(map[string]int), + toName: make(map[int]string), +} + +func (zc *ipv6ZoneCache) update(ift []net.Interface) { + zc.Lock() + defer zc.Unlock() + now := time.Now() + if zc.lastFetched.After(now.Add(-60 * time.Second)) { + return + } + zc.lastFetched = now + if len(ift) == 0 { + var err error + if ift, err = net.Interfaces(); err != nil { + return + } + } + zc.toIndex = make(map[string]int, len(ift)) + zc.toName = make(map[int]string, len(ift)) + for _, ifi := range ift { + zc.toIndex[ifi.Name] = ifi.Index + if _, ok := zc.toName[ifi.Index]; !ok { + zc.toName[ifi.Index] = ifi.Name + } + } +} + +func (zc *ipv6ZoneCache) name(zone int) string { + zoneCache.update(nil) + zoneCache.RLock() + defer zoneCache.RUnlock() + name, ok := zoneCache.toName[zone] + if !ok { + name = strconv.Itoa(zone) + } + return name +} + +func (zc *ipv6ZoneCache) index(zone string) int { + zoneCache.update(nil) + zoneCache.RLock() + defer zoneCache.RUnlock() + index, ok := zoneCache.toIndex[zone] + if !ok { + index, _ = strconv.Atoi(zone) + } + return index +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris.go b/vendor/golang.org/x/net/internal/socket/sys_solaris.go new file mode 100644 index 000000000..cced74e60 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris.go @@ -0,0 +1,71 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "runtime" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +//go:cgo_import_dynamic libc___xnet_getsockopt __xnet_getsockopt "libsocket.so" +//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" +//go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so" +//go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so" + +//go:linkname procGetsockopt libc___xnet_getsockopt +//go:linkname procSetsockopt libc_setsockopt +//go:linkname procRecvmsg libc___xnet_recvmsg +//go:linkname procSendmsg libc___xnet_sendmsg + +var ( + procGetsockopt uintptr + procSetsockopt uintptr + procRecvmsg uintptr + procSendmsg uintptr +) + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) +func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procGetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procRecvmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSendmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s new file mode 100644 index 000000000..a18ac5ed7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) + +TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSysvicall6(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go new file mode 100644 index 000000000..d9f06d00e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import ( + "errors" + "net" + "runtime" + "unsafe" +) + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64p32", "mips64p32": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +func marshalInetAddr(ip net.IP, port int, zone string) []byte { + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + return nil, errors.New("not implemented") +} + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + return 0, errors.New("not implemented") +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return errors.New("not implemented") +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_unix.go b/vendor/golang.org/x/net/internal/socket/sys_unix.go new file mode 100644 index 000000000..18eba3085 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!s390x,!386 netbsd openbsd + +package socket + +import ( + "syscall" + "unsafe" +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_windows.go b/vendor/golang.org/x/net/internal/socket/sys_windows.go new file mode 100644 index 000000000..54a470ebe --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_windows.go @@ -0,0 +1,70 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x17 + + sysSOCK_RAW = 0x3 +) + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + err := syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), (*int32)(unsafe.Pointer(&l))) + return int(l), err +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), int32(len(b))) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go new file mode 100644 index 000000000..26f8feff3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go new file mode 100644 index 000000000..e2987f7db --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go new file mode 100644 index 000000000..26f8feff3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go new file mode 100644 index 000000000..e2987f7db --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go new file mode 100644 index 000000000..c582abd57 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go new file mode 100644 index 000000000..04a24886c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go new file mode 100644 index 000000000..35c7cb9c9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go new file mode 100644 index 000000000..04a24886c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go new file mode 100644 index 000000000..430206930 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go new file mode 100644 index 000000000..1502f6c55 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go new file mode 100644 index 000000000..430206930 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go new file mode 100644 index 000000000..1502f6c55 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go new file mode 100644 index 000000000..430206930 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go new file mode 100644 index 000000000..1502f6c55 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go new file mode 100644 index 000000000..1502f6c55 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go new file mode 100644 index 000000000..430206930 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go new file mode 100644 index 000000000..1502f6c55 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go new file mode 100644 index 000000000..1502f6c55 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go new file mode 100644 index 000000000..1502f6c55 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go new file mode 100644 index 000000000..db60491fe --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go @@ -0,0 +1,65 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go new file mode 100644 index 000000000..2a1a79985 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go @@ -0,0 +1,68 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go new file mode 100644 index 000000000..db60491fe --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go @@ -0,0 +1,65 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go new file mode 100644 index 000000000..1c836361e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go new file mode 100644 index 000000000..a6c0bf464 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go new file mode 100644 index 000000000..1c836361e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go new file mode 100644 index 000000000..327c63290 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go @@ -0,0 +1,60 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1a + + sysSOCK_RAW = 0x4 +) + +type iovec struct { + Base *int8 + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Accrights *int8 + Accrightslen int32 + Pad_cgo_2 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x20 +) diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 000000000..685f0e7ea --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go b/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go new file mode 100644 index 000000000..66325a912 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package timeseries + +import ( + "math" + "testing" + "time" +) + +func isNear(x *Float, y float64, tolerance float64) bool { + return math.Abs(x.Value()-y) < tolerance +} + +func isApproximate(x *Float, y float64) bool { + return isNear(x, y, 1e-2) +} + +func checkApproximate(t *testing.T, o Observable, y float64) { + x := o.(*Float) + if !isApproximate(x, y) { + t.Errorf("Wanted %g, got %g", y, x.Value()) + } +} + +func checkNear(t *testing.T, o Observable, y, tolerance float64) { + x := o.(*Float) + if !isNear(x, y, tolerance) { + t.Errorf("Wanted %g +- %g, got %g", y, tolerance, x.Value()) + } +} + +var baseTime = time.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC) + +func tu(s int64) time.Time { + return baseTime.Add(time.Duration(s) * time.Second) +} + +func tu2(s int64, ns int64) time.Time { + return baseTime.Add(time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond) +} + +func TestBasicTimeSeries(t *testing.T) { + ts := NewTimeSeries(NewFloat) + fo := new(Float) + *fo = Float(10) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(1)), 40) + checkApproximate(t, ts.Total(), 40) + ts.AddWithTime(fo, tu(3)) + ts.AddWithTime(fo, tu(3)) + ts.AddWithTime(fo, tu(3)) + checkApproximate(t, ts.Range(tu(0), tu(2)), 40) + checkApproximate(t, ts.Range(tu(2), tu(4)), 30) + checkApproximate(t, ts.Total(), 70) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(2)), 60) + checkApproximate(t, ts.Range(tu(2), tu(4)), 30) + checkApproximate(t, ts.Total(), 90) + *fo = Float(100) + ts.AddWithTime(fo, tu(100)) + checkApproximate(t, ts.Range(tu(99), tu(100)), 100) + checkApproximate(t, ts.Range(tu(0), tu(4)), 36) + checkApproximate(t, ts.Total(), 190) + *fo = Float(10) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(4)), 44) + checkApproximate(t, ts.Range(tu(37), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Range(tu(50), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Range(tu(99), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Total(), 210) + + for i, l := range ts.ComputeRange(tu(36), tu(100), 64) { + if i == 63 { + checkApproximate(t, l, 100) + } else { + checkApproximate(t, l, 0) + } + } + + checkApproximate(t, ts.Range(tu(0), tu(100)), 210) + checkApproximate(t, ts.Range(tu(10), tu(100)), 100) + + for i, l := range ts.ComputeRange(tu(0), tu(100), 100) { + if i < 10 { + checkApproximate(t, l, 11) + } else if i >= 90 { + checkApproximate(t, l, 10) + } else { + checkApproximate(t, l, 0) + } + } +} + +func TestFloat(t *testing.T) { + f := Float(1) + if g, w := f.String(), "1"; g != w { + t.Errorf("Float(1).String = %q; want %q", g, w) + } + f2 := Float(2) + var o Observable = &f2 + f.Add(o) + if g, w := f.Value(), 3.0; g != w { + t.Errorf("Float post-add = %v; want %v", g, w) + } + f.Multiply(2) + if g, w := f.Value(), 6.0; g != w { + t.Errorf("Float post-multiply = %v; want %v", g, w) + } + f.Clear() + if g, w := f.Value(), 0.0; g != w { + t.Errorf("Float post-clear = %v; want %v", g, w) + } + f.CopyFrom(&f2) + if g, w := f.Value(), 2.0; g != w { + t.Errorf("Float post-CopyFrom = %v; want %v", g, w) + } +} + +type mockClock struct { + time time.Time +} + +func (m *mockClock) Time() time.Time { return m.time } +func (m *mockClock) Set(t time.Time) { m.time = t } + +const buckets = 6 + +var testResolutions = []time.Duration{ + 10 * time.Second, // level holds one minute of observations + 100 * time.Second, // level holds ten minutes of observations + 10 * time.Minute, // level holds one hour of observations +} + +// TestTimeSeries uses a small number of buckets to force a higher +// error rate on approximations from the timeseries. +type TestTimeSeries struct { + timeSeries +} + +func TestExpectedErrorRate(t *testing.T) { + ts := new(TestTimeSeries) + fake := new(mockClock) + fake.Set(time.Now()) + ts.timeSeries.init(testResolutions, NewFloat, buckets, fake) + for i := 1; i <= 61*61; i++ { + fake.Set(fake.Time().Add(1 * time.Second)) + ob := Float(1) + ts.AddWithTime(&ob, fake.Time()) + + // The results should be accurate within one missing bucket (1/6) of the observations recorded. + checkNear(t, ts.Latest(0, buckets), min(float64(i), 60), 10) + checkNear(t, ts.Latest(1, buckets), min(float64(i), 600), 100) + checkNear(t, ts.Latest(2, buckets), min(float64(i), 3600), 600) + } +} + +func min(a, b float64) float64 { + if a < b { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/ipv4/batch.go b/vendor/golang.org/x/net/ipv4/batch.go new file mode 100644 index 000000000..b44549928 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/batch.go @@ -0,0 +1,191 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +import ( + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// RawConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +// +// Unlike the ReadFrom method, it doesn't strip the IPv4 header +// followed by option headers from the received IPv4 datagram when the +// underlying transport is net.IPConn. Each Buffers field of Message +// must be large enough to accommodate an IPv4 header and option +// headers. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *packetHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv4/bpf_test.go b/vendor/golang.org/x/net/ipv4/bpf_test.go new file mode 100644 index 000000000..b44da9054 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/bpf_test.go @@ -0,0 +1,93 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" +) + +func TestBPF(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("not supported on %s", runtime.GOOS) + } + + l, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + p := ipv4.NewPacketConn(l) + + // This filter accepts UDP packets whose first payload byte is + // even. + prog, err := bpf.Assemble([]bpf.Instruction{ + // Load the first byte of the payload (skipping UDP header). + bpf.LoadAbsolute{Off: 8, Size: 1}, + // Select LSB of the byte. + bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1}, + // Byte is even? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1}, + // Accept. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("compiling BPF: %s", err) + } + + if err = p.SetBPF(prog); err != nil { + t.Fatalf("attaching filter to Conn: %s", err) + } + + s, err := net.Dial("udp4", l.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer s.Close() + go func() { + for i := byte(0); i < 10; i++ { + s.Write([]byte{i}) + } + }() + + l.SetDeadline(time.Now().Add(2 * time.Second)) + seen := make([]bool, 5) + for { + var b [512]byte + n, _, err := l.ReadFrom(b[:]) + if err != nil { + t.Fatalf("reading from listener: %s", err) + } + if n != 1 { + t.Fatalf("unexpected packet length, want 1, got %d", n) + } + if b[0] >= 10 { + t.Fatalf("unexpected byte, want 0-9, got %d", b[0]) + } + if b[0]%2 != 0 { + t.Fatalf("got odd byte %d, wanted only even bytes", b[0]) + } + seen[b[0]/2] = true + + seenAll := true + for _, v := range seen { + if !v { + seenAll = false + break + } + } + if seenAll { + break + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/control.go b/vendor/golang.org/x/net/ipv4/control.go new file mode 100644 index 000000000..a2b02ca95 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +type ControlFlags uint + +const ( + FlagTTL ControlFlags = 1 << iota // pass the TTL on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet +) + +// A ControlMessage represents per packet basis IP-level socket options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn or RawConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn or RawConn allows to send the options + // to the protocol stack. + // + TTL int // time-to-live, receiving only + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("ttl=%d src=%v dst=%v ifindex=%d", cm.TTL, cm.Src, cm.Dst, cm.IfIndex) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var m socket.ControlMessage + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) { + m = socket.NewControlMessage([]int{ctlOpts[ctlPacketInfo].length}) + } + if len(m) > 0 { + ctlOpts[ctlPacketInfo].marshal(m, cm) + } + return m +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIP { + continue + } + switch { + case typ == ctlOpts[ctlTTL].name && l >= ctlOpts[ctlTTL].length: + ctlOpts[ctlTTL].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlDst].name && l >= ctlOpts[ctlDst].length: + ctlOpts[ctlDst].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlInterface].name && l >= ctlOpts[ctlInterface].length: + ctlOpts[ctlInterface].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTTL].length) + } + if ctlOpts[ctlPacketInfo].name > 0 { + if opt.isset(FlagSrc | FlagDst | FlagInterface) { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + } else { + if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlDst].length) + } + if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlInterface].length) + } + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTTL = iota // header field + ctlSrc // header field + ctlDst // header field + ctlInterface // inbound or outbound interface + ctlPacketInfo // inbound or outbound packet path + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go new file mode 100644 index 000000000..77e7ad5be --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalDst(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVDSTADDR, net.IPv4len) + return m.Next(net.IPv4len) +} + +func parseDst(cm *ControlMessage, b []byte) { + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, b[:net.IPv4len]) +} + +func marshalInterface(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVIF, syscall.SizeofSockaddrDatalink) + return m.Next(syscall.SizeofSockaddrDatalink) +} + +func parseInterface(cm *ControlMessage, b []byte) { + sadl := (*syscall.SockaddrDatalink)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(sadl.Index) +} diff --git a/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/golang.org/x/net/ipv4/control_pktinfo.go new file mode 100644 index 000000000..425338f35 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_PKTINFO, sizeofInetPktinfo) + if cm != nil { + pi := (*inetPktinfo)(unsafe.Pointer(&m.Data(sizeofInetPktinfo)[0])) + if ip := cm.Src.To4(); ip != nil { + copy(pi.Spec_dst[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInetPktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inetPktinfo)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(pi.Ifindex) + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, pi.Addr[:]) +} diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go new file mode 100644 index 000000000..5a2f7d8d3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/control_test.go b/vendor/golang.org/x/net/ipv4/control_test.go new file mode 100644 index 000000000..f87fe124b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_test.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "testing" + + "golang.org/x/net/ipv4" +) + +func TestControlMessageParseWithFuzz(t *testing.T) { + var cm ipv4.ControlMessage + for _, fuzz := range []string{ + "\f\x00\x00\x00\x00\x00\x00\x00\x14\x00\x00\x00", + "\f\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00", + } { + cm.Parse([]byte(fuzz)) + } +} diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go new file mode 100644 index 000000000..e1ae8167b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_unix.go @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTTL]; ok && cf&FlagTTL != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTTL) + } else { + opt.clear(FlagTTL) + } + } + if so, ok := sockOpts[ssoPacketInfo]; ok { + if cf&(FlagSrc|FlagDst|FlagInterface) != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & (FlagSrc | FlagDst | FlagInterface)) + } else { + opt.clear(cf & (FlagSrc | FlagDst | FlagInterface)) + } + } + } else { + if so, ok := sockOpts[ssoReceiveDst]; ok && cf&FlagDst != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagDst) + } else { + opt.clear(FlagDst) + } + } + if so, ok := sockOpts[ssoReceiveInterface]; ok && cf&FlagInterface != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagInterface) + } else { + opt.clear(FlagInterface) + } + } + } + return nil +} + +func marshalTTL(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVTTL, 1) + return m.Next(1) +} + +func parseTTL(cm *ControlMessage, b []byte) { + cm.TTL = int(*(*byte)(unsafe.Pointer(&b[:1][0]))) +} diff --git a/vendor/golang.org/x/net/ipv4/control_windows.go b/vendor/golang.org/x/net/ipv4/control_windows.go new file mode 100644 index 000000000..ce55c6644 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_windows.go @@ -0,0 +1,16 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "syscall" + + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return syscall.EWINDOWS +} diff --git a/vendor/golang.org/x/net/ipv4/defs_darwin.go b/vendor/golang.org/x/net/ipv4/defs_darwin.go new file mode 100644 index 000000000..c8f2e05b8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_darwin.go @@ -0,0 +1,77 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_STRIPHDR = C.IP_STRIPHDR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go new file mode 100644 index 000000000..f30544ea2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/vendor/golang.org/x/net/ipv4/defs_freebsd.go new file mode 100644 index 000000000..4dd57d865 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_freebsd.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_SENDSRCADDR = C.IP_SENDSRCADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_ONESBCAST = C.IP_ONESBCAST + sysIP_BINDANY = C.IP_BINDANY + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_MINTTL = C.IP_MINTTL + sysIP_DONTFRAG = C.IP_DONTFRAG + sysIP_RECVTOS = C.IP_RECVTOS + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_linux.go b/vendor/golang.org/x/net/ipv4/defs_linux.go new file mode 100644 index 000000000..beb11071a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_linux.go @@ -0,0 +1,122 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_PKTOPTIONS = C.IP_PKTOPTIONS + sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER + sysIP_RECVERR = C.IP_RECVERR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_RECVTOS = C.IP_RECVTOS + sysIP_MTU = C.IP_MTU + sysIP_FREEBIND = C.IP_FREEBIND + sysIP_TRANSPARENT = C.IP_TRANSPARENT + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR + sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR + sysIP_MINTTL = C.IP_MINTTL + sysIP_NODEFRAG = C.IP_NODEFRAG + sysIP_UNICAST_IF = C.IP_UNICAST_IF + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_MSFILTER = C.IP_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL + + //sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT + //sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT + //sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO + //sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE + //sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE + //sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT + + sysICMP_FILTER = C.ICMP_FILTER + + sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE + sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL + sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP + sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6 + sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS + sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING + + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER + + sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + sizeofSockExtendedErr = C.sizeof_struct_sock_extended_err + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPFilter = C.sizeof_struct_icmp_filter + + sizeofSockFprog = C.sizeof_struct_sock_fprog +) + +type kernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type sockExtendedErr C.struct_sock_extended_err + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpFilter C.struct_icmp_filter + +type sockFProg C.struct_sock_fprog + +type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/vendor/golang.org/x/net/ipv4/defs_netbsd.go new file mode 100644 index 000000000..8f8af1b89 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_netbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/vendor/golang.org/x/net/ipv4/defs_openbsd.go new file mode 100644 index 000000000..8f8af1b89 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_openbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_solaris.go b/vendor/golang.org/x/net/ipv4/defs_solaris.go new file mode 100644 index 000000000..aeb33e9c8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_solaris.go @@ -0,0 +1,84 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVSLLA = C.IP_RECVSLLA + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_NEXTHOP = C.IP_NEXTHOP + + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + sysIP_DONTFRAG = C.IP_DONTFRAG + + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC + sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL + sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF + + sysIP_REUSEADDR = C.IP_REUSEADDR + sysIP_DONTROUTE = C.IP_DONTROUTE + sysIP_BROADCAST = C.IP_BROADCAST + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type ipMreq C.struct_ip_mreq + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/dgramopt.go b/vendor/golang.org/x/net/ipv4/dgramopt.go new file mode 100644 index 000000000..54d77d5fe --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/dgramopt.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/bpf" +) + +// MulticastTTL returns the time-to-live field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastTTL() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetMulticastTTL sets the time-to-live field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastTTL(ttl int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, ttl) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errOpNoSupport + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errOpNoSupport + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errOpNoSupport + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ICMPFilter returns an ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errOpNoSupport + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errOpNoSupport + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errOpNoSupport + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go new file mode 100644 index 000000000..b43935a5a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -0,0 +1,244 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv4 implements IP-level socket options for the Internet +// Protocol version 4. +// +// The package provides IP-level socket options that allow +// manipulation of IPv4 facilities. +// +// The IPv4 protocol and basic host requirements for IPv4 are defined +// in RFC 791 and RFC 1122. +// Host extensions for multicasting and socket interface extensions +// for multicast source filters are defined in RFC 1112 and RFC 3678. +// IGMPv1, IGMPv2 and IGMPv3 are defined in RFC 1112, RFC 2236 and RFC +// 3376. +// Source-specific multicast is defined in RFC 4607. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv4 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the type-of-service field on the +// IPv4 header for each packet. +// +// ln, err := net.Listen("tcp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv4.NewConn(c).SetTOS(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPconn which are created as network connections that use the +// IPv4 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.IPv4(224, 0, 0, 250) +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv4 and Ethernet. +// +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, cm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if cm.Dst.IsMulticast() { +// if cm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTOS(0x0) +// p.SetTTL(16) +// if _, err := p.WriteTo(data, nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// for _, ifi := range []*net.Interface{en0, en1} { +// if err := p.SetMulticastInterface(ifi); err != nil { +// // error handling +// } +// p.SetMulticastTTL(2) +// if _, err := p.WriteTo(data, nil, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn or RawConn may join multiple +// multicast groups. For example, a UDP listener with port 1024 might +// join two different groups across over two different network +// interfaces by using: +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv4.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// p2 := ipv4.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn or RawConn on IGMPv3 supported +// platform is able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)} +// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)}) +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 254)} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on IGMPv3 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// IGMPv1 or IGMPv2 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv4 // import "golang.org/x/net/ipv4" + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/golang.org/x/net/ipv4/endpoint.go new file mode 100644 index 000000000..2ab877363 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/endpoint.go @@ -0,0 +1,187 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn and RawConn are +// not implemented. + +// A Conn represents a network endpoint that uses the IPv4 transport. +// It is used to control basic IP-level socket options such as TOS and +// TTL. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses the +// IPv4 transport. It is used to control several IP-level socket +// options including multicasting. It also provides datagram based +// network I/O methods specific to the IPv4 and higher layer protocols +// such as UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage sets the per packet IP-level socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + p := &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } + return p +} + +// A RawConn represents a packet network endpoint that uses the IPv4 +// transport. It is used to control several IP-level socket options +// including IPv4 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv4 and higher layer +// protocols that handle IPv4 datagram directly such as OSPF, GRE. +type RawConn struct { + genericOpt + dgramOpt + packetHandler +} + +// SetControlMessage sets the per packet IP-level socket options. +func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.packetHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *RawConn) SetDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *RawConn) SetReadDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *RawConn) SetWriteDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *RawConn) Close() error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.Close() +} + +// NewRawConn returns a new RawConn using c as its underlying +// transport. +func NewRawConn(c net.PacketConn) (*RawConn, error) { + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + return nil, err + } + r := &RawConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + packetHandler: packetHandler{IPConn: c.(*net.IPConn), Conn: cc}, + } + so, ok := sockOpts[ssoHeaderPrepend] + if !ok { + return nil, errOpNoSupport + } + if err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil { + return nil, err + } + return r, nil +} diff --git a/vendor/golang.org/x/net/ipv4/example_test.go b/vendor/golang.org/x/net/ipv4/example_test.go new file mode 100644 index 000000000..ddc7577e8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/example_test.go @@ -0,0 +1,224 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "fmt" + "log" + "net" + "os" + "runtime" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" +) + +func ExampleConn_markingTCP() { + ln, err := net.Listen("tcp", "0.0.0.0:1024") + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + for { + c, err := ln.Accept() + if err != nil { + log.Fatal(err) + } + go func(c net.Conn) { + defer c.Close() + if c.RemoteAddr().(*net.TCPAddr).IP.To4() != nil { + p := ipv4.NewConn(c) + if err := p.SetTOS(0x28); err != nil { // DSCP AF11 + log.Fatal(err) + } + if err := p.SetTTL(128); err != nil { + log.Fatal(err) + } + } + if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil { + log.Fatal(err) + } + }(c) + } +} + +func ExamplePacketConn_servingOneShotMulticastDNS() { + c, err := net.ListenPacket("udp4", "0.0.0.0:5353") // mDNS over UDP + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + mDNSLinkLocal := net.UDPAddr{IP: net.IPv4(224, 0, 0, 251)} + if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &mDNSLinkLocal) + if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { + log.Fatal(err) + } + + b := make([]byte, 1500) + for { + _, cm, peer, err := p.ReadFrom(b) + if err != nil { + log.Fatal(err) + } + if !cm.Dst.IsMulticast() || !cm.Dst.Equal(mDNSLinkLocal.IP) { + continue + } + answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this + if _, err := p.WriteTo(answers, nil, peer); err != nil { + log.Fatal(err) + } + } +} + +func ExamplePacketConn_tracingIPPacketRoute() { + // Tracing an IP packet route to www.google.com. + + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + log.Fatal(err) + } + var dst net.IPAddr + for _, ip := range ips { + if ip.To4() != nil { + dst.IP = ip + fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host) + break + } + } + if dst.IP == nil { + log.Fatal("no A record found") + } + + c, err := net.ListenPacket("ip4:1", "0.0.0.0") // ICMP for IPv4 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + + if err := p.SetControlMessage(ipv4.FlagTTL|ipv4.FlagSrc|ipv4.FlagDst|ipv4.FlagInterface, true); err != nil { + log.Fatal(err) + } + wm := icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + } + + rb := make([]byte, 1500) + for i := 1; i <= 64; i++ { // up to 64 hops + wm.Body.(*icmp.Echo).Seq = i + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + if err := p.SetTTL(i); err != nil { + log.Fatal(err) + } + + // In the real world usually there are several + // multiple traffic-engineered paths for each hop. + // You may need to probe a few times to each hop. + begin := time.Now() + if _, err := p.WriteTo(wb, nil, &dst); err != nil { + log.Fatal(err) + } + if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + log.Fatal(err) + } + n, cm, peer, err := p.ReadFrom(rb) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + fmt.Printf("%v\t*\n", i) + continue + } + log.Fatal(err) + } + rm, err := icmp.ParseMessage(1, rb[:n]) + if err != nil { + log.Fatal(err) + } + rtt := time.Since(begin) + + // In the real world you need to determine whether the + // received message is yours using ControlMessage.Src, + // ControlMessage.Dst, icmp.Echo.ID and icmp.Echo.Seq. + switch rm.Type { + case ipv4.ICMPTypeTimeExceeded: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm) + case ipv4.ICMPTypeEchoReply: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm) + return + default: + log.Printf("unknown ICMP message: %+v\n", rm) + } + } +} + +func ExampleRawConn_advertisingOSPFHello() { + c, err := net.ListenPacket("ip4:89", "0.0.0.0") // OSPF for IPv4 + if err != nil { + log.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + log.Fatal(err) + } + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + allSPFRouters := net.IPAddr{IP: net.IPv4(224, 0, 0, 5)} + if err := r.JoinGroup(en0, &allSPFRouters); err != nil { + log.Fatal(err) + } + defer r.LeaveGroup(en0, &allSPFRouters) + + hello := make([]byte, 24) // fake hello data, you need to implement this + ospf := make([]byte, 24) // fake ospf header, you need to implement this + ospf[0] = 2 // version 2 + ospf[1] = 1 // hello packet + ospf = append(ospf, hello...) + iph := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: 0xc0, // DSCP CS6 + TotalLen: ipv4.HeaderLen + len(ospf), + TTL: 1, + Protocol: 89, + Dst: allSPFRouters.IP.To4(), + } + + var cm *ipv4.ControlMessage + switch runtime.GOOS { + case "darwin", "linux": + cm = &ipv4.ControlMessage{IfIndex: en0.Index} + default: + if err := r.SetMulticastInterface(en0); err != nil { + log.Fatal(err) + } + } + if err := r.WriteTo(iph, ospf, cm); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/golang.org/x/net/ipv4/gen.go b/vendor/golang.org/x/net/ipv4/gen.go new file mode 100644 index 000000000..9d490fac9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/gen.go @@ -0,0 +1,199 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml", + parseICMPv4Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "package ipv4\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv4Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv4Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigDescr) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv4Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Descr string `xml:"description"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv4ParamRecord struct { + OrigDescr string + Descr string + Value int +} + +func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Descr, "Reserved") || + strings.Contains(pr.Descr, "Unassigned") || + strings.Contains(pr.Descr, "Deprecated") || + strings.Contains(pr.Descr, "Experiment") || + strings.Contains(pr.Descr, "experiment") { + continue + } + ss := strings.Split(pr.Descr, "\n") + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + s := strings.TrimSpace(prs[i].Descr) + prs[i].OrigDescr = s + prs[i].Descr = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv4/genericopt.go b/vendor/golang.org/x/net/ipv4/genericopt.go new file mode 100644 index 000000000..119bf841b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/genericopt.go @@ -0,0 +1,57 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "syscall" + +// TOS returns the type-of-service field value for outgoing packets. +func (c *genericOpt) TOS() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTOS] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTOS sets the type-of-service field value for future outgoing +// packets. +func (c *genericOpt) SetTOS(tos int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTOS] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, tos) +} + +// TTL returns the time-to-live field value for outgoing packets. +func (c *genericOpt) TTL() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTTL] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTTL sets the time-to-live field value for future outgoing +// packets. +func (c *genericOpt) SetTTL(ttl int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTTL] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, ttl) +} diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go new file mode 100644 index 000000000..8bb0f0f4d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header.go @@ -0,0 +1,159 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "encoding/binary" + "fmt" + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +const ( + Version = 4 // protocol version + HeaderLen = 20 // header length without extension headers + maxHeaderLen = 60 // sensible default, revisit if later RFCs define new usage of version and header length fields +) + +type HeaderFlags int + +const ( + MoreFragments HeaderFlags = 1 << iota // more fragments flag + DontFragment // don't fragment flag +) + +// A Header represents an IPv4 header. +type Header struct { + Version int // protocol version + Len int // header length + TOS int // type-of-service + TotalLen int // packet total length + ID int // identification + Flags HeaderFlags // flags + FragOff int // fragment offset + TTL int // time-to-live + Protocol int // next protocol + Checksum int // checksum + Src net.IP // source address + Dst net.IP // destination address + Options []byte // options, extension headers +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst) +} + +// Marshal returns the binary encoding of h. +func (h *Header) Marshal() ([]byte, error) { + if h == nil { + return nil, syscall.EINVAL + } + if h.Len < HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := HeaderLen + len(h.Options) + b := make([]byte, hdrlen) + b[0] = byte(Version<<4 | (hdrlen >> 2 & 0x0f)) + b[1] = byte(h.TOS) + flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + case "freebsd": + if freebsdVersion < 1100000 { + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } else { + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + default: + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + binary.BigEndian.PutUint16(b[4:6], uint16(h.ID)) + b[8] = byte(h.TTL) + b[9] = byte(h.Protocol) + binary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum)) + if ip := h.Src.To4(); ip != nil { + copy(b[12:16], ip[:net.IPv4len]) + } + if ip := h.Dst.To4(); ip != nil { + copy(b[16:20], ip[:net.IPv4len]) + } else { + return nil, errMissingAddress + } + if len(h.Options) > 0 { + copy(b[HeaderLen:], h.Options) + } + return b, nil +} + +// Parse parses b as an IPv4 header and sotres the result in h. +func (h *Header) Parse(b []byte) error { + if h == nil || len(b) < HeaderLen { + return errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if hdrlen > len(b) { + return errBufferTooShort + } + h.Version = int(b[0] >> 4) + h.Len = hdrlen + h.TOS = int(b[1]) + h.ID = int(binary.BigEndian.Uint16(b[4:6])) + h.TTL = int(b[8]) + h.Protocol = int(b[9]) + h.Checksum = int(binary.BigEndian.Uint16(b[10:12])) + h.Src = net.IPv4(b[12], b[13], b[14], b[15]) + h.Dst = net.IPv4(b[16], b[17], b[18], b[19]) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + hdrlen + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + case "freebsd": + if freebsdVersion < 1100000 { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + if freebsdVersion < 1000000 { + h.TotalLen += hdrlen + } + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + } else { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + optlen := hdrlen - HeaderLen + if optlen > 0 && len(b) >= hdrlen { + if cap(h.Options) < optlen { + h.Options = make([]byte, optlen) + } else { + h.Options = h.Options[:optlen] + } + copy(h.Options, b[HeaderLen:hdrlen]) + } + return nil +} + +// ParseHeader parses b as an IPv4 header. +func ParseHeader(b []byte) (*Header, error) { + h := new(Header) + if err := h.Parse(b); err != nil { + return nil, err + } + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv4/header_test.go b/vendor/golang.org/x/net/ipv4/header_test.go new file mode 100644 index 000000000..a246aeea1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header_test.go @@ -0,0 +1,228 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "bytes" + "encoding/binary" + "net" + "reflect" + "runtime" + "strings" + "testing" + + "golang.org/x/net/internal/socket" +) + +type headerTest struct { + wireHeaderFromKernel []byte + wireHeaderToKernel []byte + wireHeaderFromTradBSDKernel []byte + wireHeaderToTradBSDKernel []byte + wireHeaderFromFreeBSD10Kernel []byte + wireHeaderToFreeBSD10Kernel []byte + *Header +} + +var headerLittleEndianTests = []headerTest{ + // TODO(mikio): Add platform dependent wire header formats when + // we support new platforms. + { + wireHeaderFromKernel: []byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToKernel: []byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromTradBSDKernel: []byte{ + 0x45, 0x01, 0xdb, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToTradBSDKernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromFreeBSD10Kernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToFreeBSD10Kernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + Header: &Header{ + Version: Version, + Len: HeaderLen, + TOS: 1, + TotalLen: 0xbeef, + ID: 0xcafe, + Flags: DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + }, + }, + + // with option headers + { + wireHeaderFromKernel: []byte{ + 0x46, 0x01, 0xbe, 0xf3, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToKernel: []byte{ + 0x46, 0x01, 0xbe, 0xf3, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderFromTradBSDKernel: []byte{ + 0x46, 0x01, 0xdb, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToTradBSDKernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderFromFreeBSD10Kernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToFreeBSD10Kernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + Header: &Header{ + Version: Version, + Len: HeaderLen + 4, + TOS: 1, + TotalLen: 0xbef3, + ID: 0xcafe, + Flags: DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + Options: []byte{0xff, 0xfe, 0xfe, 0xff}, + }, + }, +} + +func TestMarshalHeader(t *testing.T) { + if socket.NativeEndian != binary.LittleEndian { + t.Skip("no test for non-little endian machine yet") + } + + for _, tt := range headerLittleEndianTests { + b, err := tt.Header.Marshal() + if err != nil { + t.Fatal(err) + } + var wh []byte + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + wh = tt.wireHeaderToTradBSDKernel + case "freebsd": + switch { + case freebsdVersion < 1000000: + wh = tt.wireHeaderToTradBSDKernel + case 1000000 <= freebsdVersion && freebsdVersion < 1100000: + wh = tt.wireHeaderToFreeBSD10Kernel + default: + wh = tt.wireHeaderToKernel + } + default: + wh = tt.wireHeaderToKernel + } + if !bytes.Equal(b, wh) { + t.Fatalf("got %#v; want %#v", b, wh) + } + } +} + +func TestParseHeader(t *testing.T) { + if socket.NativeEndian != binary.LittleEndian { + t.Skip("no test for big endian machine yet") + } + + for _, tt := range headerLittleEndianTests { + var wh []byte + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + wh = tt.wireHeaderFromTradBSDKernel + case "freebsd": + switch { + case freebsdVersion < 1000000: + wh = tt.wireHeaderFromTradBSDKernel + case 1000000 <= freebsdVersion && freebsdVersion < 1100000: + wh = tt.wireHeaderFromFreeBSD10Kernel + default: + wh = tt.wireHeaderFromKernel + } + default: + wh = tt.wireHeaderFromKernel + } + h, err := ParseHeader(wh) + if err != nil { + t.Fatal(err) + } + if err := h.Parse(wh); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, tt.Header) { + t.Fatalf("got %#v; want %#v", h, tt.Header) + } + s := h.String() + if strings.Contains(s, ",") { + t.Fatalf("should be space-separated values: %s", s) + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/helper.go b/vendor/golang.org/x/net/ipv4/helper.go new file mode 100644 index 000000000..a5052e324 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper.go @@ -0,0 +1,63 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "errors" + "net" +) + +var ( + errMissingAddress = errors.New("missing address") + errMissingHeader = errors.New("missing header") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errInvalidConnType = errors.New("invalid conn type") + errOpNoSupport = errors.New("operation not supported") + errNoSuchInterface = errors.New("no such interface") + errNoSuchMulticastInterface = errors.New("no such multicast interface") + + // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. + freebsdVersion uint32 +) + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP4(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv4/iana.go b/vendor/golang.org/x/net/ipv4/iana.go new file mode 100644 index 000000000..be10c9488 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/iana.go @@ -0,0 +1,34 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package ipv4 + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19 +const ( + ICMPTypeEchoReply ICMPType = 0 // Echo Reply + ICMPTypeDestinationUnreachable ICMPType = 3 // Destination Unreachable + ICMPTypeRedirect ICMPType = 5 // Redirect + ICMPTypeEcho ICMPType = 8 // Echo + ICMPTypeRouterAdvertisement ICMPType = 9 // Router Advertisement + ICMPTypeRouterSolicitation ICMPType = 10 // Router Solicitation + ICMPTypeTimeExceeded ICMPType = 11 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 12 // Parameter Problem + ICMPTypeTimestamp ICMPType = 13 // Timestamp + ICMPTypeTimestampReply ICMPType = 14 // Timestamp Reply + ICMPTypePhoturis ICMPType = 40 // Photuris +) + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19 +var icmpTypes = map[ICMPType]string{ + 0: "echo reply", + 3: "destination unreachable", + 5: "redirect", + 8: "echo", + 9: "router advertisement", + 10: "router solicitation", + 11: "time exceeded", + 12: "parameter problem", + 13: "timestamp", + 14: "timestamp reply", + 40: "photuris", +} diff --git a/vendor/golang.org/x/net/ipv4/icmp.go b/vendor/golang.org/x/net/ipv4/icmp.go new file mode 100644 index 000000000..9902bb3d2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/iana" + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv4 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model and it works not +// only for IPv6 but IPv4. A node means a device that implements IP. +// A router means a node that forwards IP packets not explicitly +// addressed to itself, and a host means a node that is not a router. +type ICMPFilter struct { + icmpFilter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_linux.go b/vendor/golang.org/x/net/ipv4/icmp_linux.go new file mode 100644 index 000000000..6e1c5c80a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_linux.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +func (f *icmpFilter) accept(typ ICMPType) { + f.Data &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) block(typ ICMPType) { + f.Data |= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) setAll(block bool) { + if block { + f.Data = 1<<32 - 1 + } else { + f.Data = 0 + } +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return f.Data&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/golang.org/x/net/ipv4/icmp_stub.go new file mode 100644 index 000000000..21bb29ab3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +const sizeofICMPFilter = 0x0 + +type icmpFilter struct { +} + +func (f *icmpFilter) accept(typ ICMPType) { +} + +func (f *icmpFilter) block(typ ICMPType) { +} + +func (f *icmpFilter) setAll(block bool) { +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_test.go b/vendor/golang.org/x/net/ipv4/icmp_test.go new file mode 100644 index 000000000..3324b54df --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_test.go @@ -0,0 +1,95 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var icmpStringTests = []struct { + in ipv4.ICMPType + out string +}{ + {ipv4.ICMPTypeDestinationUnreachable, "destination unreachable"}, + + {256, ""}, +} + +func TestICMPString(t *testing.T) { + for _, tt := range icmpStringTests { + s := tt.in.String() + if s != tt.out { + t.Errorf("got %s; want %s", s, tt.out) + } + } +} + +func TestICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "linux": + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + var f ipv4.ICMPFilter + for _, toggle := range []bool{false, true} { + f.SetAll(toggle) + for _, typ := range []ipv4.ICMPType{ + ipv4.ICMPTypeDestinationUnreachable, + ipv4.ICMPTypeEchoReply, + ipv4.ICMPTypeTimeExceeded, + ipv4.ICMPTypeParameterProblem, + } { + f.Accept(typ) + if f.WillBlock(typ) { + t.Errorf("ipv4.ICMPFilter.Set(%v, false) failed", typ) + } + f.Block(typ) + if !f.WillBlock(typ) { + t.Errorf("ipv4.ICMPFilter.Set(%v, true) failed", typ) + } + } + } +} + +func TestSetICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "linux": + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip4:icmp", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + + var f ipv4.ICMPFilter + f.SetAll(true) + f.Accept(ipv4.ICMPTypeEcho) + f.Accept(ipv4.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + kf, err := p.ICMPFilter() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(kf, &f) { + t.Fatalf("got %#v; want %#v", kf, f) + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicast_test.go b/vendor/golang.org/x/net/ipv4/multicast_test.go new file mode 100644 index 000000000..bcf49736b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicast_test.go @@ -0,0 +1,334 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var packetConnReadWriteMulticastUDPTests = []struct { + addr string + grp, src *net.UDPAddr +}{ + {"224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {"232.0.1.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastUDPTests { + c, err := net.ListenPacket("udp4", tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + grp := *tt.grp + grp.Port = c.LocalAddr().(*net.UDPAddr).Port + p := ipv4.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, &grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, &grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + p.SetMulticastTTL(i + 1) + if n, err := p.WriteTo(wb, nil, &grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } + } +} + +var packetConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, tt.grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagDst | ipv4.FlagInterface + if runtime.GOOS != "solaris" { + // Solaris never allows to modify ICMP properties. + cf |= ipv4.FlagTTL + } + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + p.SetMulticastTTL(i + 1) + if n, err := p.WriteTo(wb, nil, tt.grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) + if err != nil { + t.Fatal(err) + } + switch { + case m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1 + case m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0 + default: + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } + } +} + +var rawConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestRawConnReadWriteMulticastICMP(t *testing.T) { + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range rawConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + if tt.src == nil { + if err := r.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer r.LeaveGroup(ifi, tt.grp) + } else { + if err := r.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer r.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := r.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := r.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := r.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := r.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + wh := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: i + 1, + TotalLen: ipv4.HeaderLen + len(wb), + Protocol: 1, + Dst: tt.grp.IP, + } + if err := r.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := r.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + r.SetMulticastTTL(i + 1) + if err := r.WriteTo(wh, wb, nil); err != nil { + t.Fatal(err) + } + rb := make([]byte, ipv4.HeaderLen+128) + if rh, b, _, err := r.ReadFrom(rb); err != nil { + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + switch { + case (rh.Dst.IsLoopback() || rh.Dst.IsLinkLocalUnicast() || rh.Dst.IsGlobalUnicast()) && m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1 + case rh.Dst.IsMulticast() && m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0 + default: + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicastlistener_test.go b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go new file mode 100644 index 000000000..e43fbbe08 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var udpMultipleGroupListenerTests = []net.Addr{ + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, // see RFC 4727 + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}, + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, +} + +func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c, err := net.ListenPacket("udp4", "0.0.0.0:0") // wildcard address with no reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } +} + +func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c1, err := net.ListenPacket("udp4", "224.0.0.0:0") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c1.Close() + _, port, err := net.SplitHostPort(c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + c2, err := net.ListenPacket("udp4", net.JoinHostPort("224.0.0.0", port)) // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + var ps [2]*ipv4.PacketConn + ps[0] = ipv4.NewPacketConn(c1) + ps[1] = ipv4.NewPacketConn(c2) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + for _, p := range ps { + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + for _, p := range ps { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } + } +} + +func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + type ml struct { + c *ipv4.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + port := "0" + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip4", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("udp4", net.JoinHostPort(ip.String(), port)) // unicast address with non-reusable port + if err != nil { + // The listen may fail when the serivce is + // already in use, but it's fine because the + // purpose of this is not to test the + // bookkeeping of IP control block inside the + // kernel. + t.Log(err) + continue + } + defer c.Close() + if port == "0" { + _, port, err = net.SplitHostPort(c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + } + p := ipv4.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPSingleRawConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") // wildcard address + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + if err := r.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := r.LeaveGroup(ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPPerInterfaceSingleRawConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + type ml struct { + c *ipv4.RawConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip4", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("ip4:253", ip.String()) // unicast address + if err != nil { + t.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + if err := r.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{r, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go new file mode 100644 index 000000000..f7efac24c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go @@ -0,0 +1,195 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var packetConnMulticastSocketOptionTests = []struct { + net, proto, addr string + grp, src net.Addr +}{ + {"udp4", "", "224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, nil}, // see RFC 4727 + {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727 + + {"udp4", "", "232.0.0.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 249)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 + {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnMulticastSocketOptionTests { + if tt.net == "ip4" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, p, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src) + } + } +} + +var rawConnMulticastSocketOptionTests = []struct { + grp, src net.Addr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestRawConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range rawConnMulticastSocketOptionTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, r, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, r, ifi, tt.grp, tt.src) + } + } +} + +type testIPv4MulticastConn interface { + MulticastTTL() (int, error) + SetMulticastTTL(ttl int) error + MulticastLoopback() (bool, error) + SetMulticastLoopback(bool) error + JoinGroup(*net.Interface, net.Addr) error + LeaveGroup(*net.Interface, net.Addr) error + JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error +} + +func testMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp net.Addr) { + const ttl = 255 + if err := c.SetMulticastTTL(ttl); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastTTL(); err != nil { + t.Error(err) + return + } else if v != ttl { + t.Errorf("got %v; want %v", v, ttl) + return + } + + for _, toggle := range []bool{true, false} { + if err := c.SetMulticastLoopback(toggle); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastLoopback(); err != nil { + t.Error(err) + return + } else if v != toggle { + t.Errorf("got %v; want %v", v, toggle) + return + } + } + + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} + +func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp, src net.Addr) { + // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + return + } + t.Error(err) + return + } + if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} diff --git a/vendor/golang.org/x/net/ipv4/packet.go b/vendor/golang.org/x/net/ipv4/packet.go new file mode 100644 index 000000000..f00f5b052 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet.go @@ -0,0 +1,69 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadFrom and WriteTo methods of RawConn +// are not implemented. + +// A packetHandler represents the IPv4 datagram handler. +type packetHandler struct { + *net.IPConn + *socket.Conn + rawOpt +} + +func (c *packetHandler) ok() bool { return c != nil && c.IPConn != nil && c.Conn != nil } + +// ReadFrom reads an IPv4 datagram from the endpoint c, copying the +// datagram into b. It returns the received datagram as the IPv4 +// header h, the payload p and the control message cm. +func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + if !c.ok() { + return nil, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +func slicePacket(b []byte) (h, p []byte, err error) { + if len(b) < HeaderLen { + return nil, nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + return b[:hdrlen], b[hdrlen:], nil +} + +// WriteTo writes an IPv4 datagram through the endpoint c, copying the +// datagram from the IPv4 header h and the payload p. The control +// message cm allows the datagram path and the outgoing interface to be +// specified. Currently only Darwin and Linux support this. The cm +// may be nil if control of the outgoing datagram is not required. +// +// The IPv4 header h must contain appropriate fields that include: +// +// Version = +// Len = +// TOS = +// TotalLen = +// ID = platform sets an appropriate value if ID is zero +// FragOff = +// TTL = +// Protocol = +// Checksum = platform sets an appropriate value if Checksum is zero +// Src = platform sets an appropriate value if Src is nil +// Dst = +// Options = optional +func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { + if !c.ok() { + return syscall.EINVAL + } + return c.writeTo(h, p, cm) +} diff --git a/vendor/golang.org/x/net/ipv4/packet_go1_8.go b/vendor/golang.org/x/net/ipv4/packet_go1_8.go new file mode 100644 index 000000000..b47d18683 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet_go1_8.go @@ -0,0 +1,56 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv4 + +import "net" + +func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + n, nn, _, src, err := c.ReadMsgIP(b, oob) + if err != nil { + return nil, nil, nil, err + } + var hs []byte + if hs, p, err = slicePacket(b[:n]); err != nil { + return nil, nil, nil, err + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, err + } + if nn > 0 { + cm = new(ControlMessage) + if err := cm.Parse(oob[:nn]); err != nil { + return nil, nil, nil, err + } + } + if src != nil && cm != nil { + cm.Src = src.IP + } + return +} + +func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { + oob := cm.Marshal() + wh, err := h.Marshal() + if err != nil { + return err + } + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + wh = append(wh, p...) + _, _, err = c.WriteMsgIP(wh, oob, dst) + return err +} diff --git a/vendor/golang.org/x/net/ipv4/packet_go1_9.go b/vendor/golang.org/x/net/ipv4/packet_go1_9.go new file mode 100644 index 000000000..082c36d73 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet_go1_9.go @@ -0,0 +1,67 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + if err := c.RecvMsg(&m, 0); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + var hs []byte + if hs, p, err = slicePacket(b[:m.N]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + } + if src, ok := m.Addr.(*net.IPAddr); ok && cm != nil { + cm.Src = src.IP + } + return +} + +func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { + m := socket.Message{ + OOB: cm.Marshal(), + } + wh, err := h.Marshal() + if err != nil { + return err + } + m.Buffers = [][]byte{wh, p} + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + m.Addr = dst + if err := c.SendMsg(&m, 0); err != nil { + return &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/payload.go b/vendor/golang.org/x/net/ipv4/payload.go new file mode 100644 index 000000000..f95f811ac --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv4 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go new file mode 100644 index 000000000..3f06d7606 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -0,0 +1,36 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !nacl,!plan9,!windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.writeTo(b, cm, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go new file mode 100644 index 000000000..d26ccd90c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go @@ -0,0 +1,59 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 +// +build !nacl,!plan9,!windows + +package ipv4 + +import "net" + +func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + var nn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + nb := make([]byte, maxHeaderLen+len(b)) + if n, nn, _, src, err = c.ReadMsgIP(nb, oob); err != nil { + return 0, nil, nil, err + } + hdrlen := int(nb[0]&0x0f) << 2 + copy(b, nb[hdrlen:]) + n -= hdrlen + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} + } + if nn > 0 { + cm = new(ControlMessage) + if err = cm.Parse(oob[:nn]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + } + if cm != nil { + cm.Src = netAddrToIP4(src) + } + return +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + oob := cm.Marshal() + if dst == nil { + return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} + } + return +} diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go new file mode 100644 index 000000000..2f1931183 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go @@ -0,0 +1,67 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !nacl,!plan9,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { + c.rawOpt.RLock() + m := socket.Message{ + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + m.Buffers = [][]byte{b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + h := make([]byte, HeaderLen) + m.Buffers = [][]byte{h, b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + hdrlen := int(h[0]&0x0f) << 2 + if hdrlen > len(h) { + d := hdrlen - len(h) + copy(b, b[d:]) + m.N -= d + } else { + m.N -= hdrlen + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + var cm *ControlMessage + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP4(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err := c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go new file mode 100644 index 000000000..3926de70b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go b/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go new file mode 100644 index 000000000..1cd926e7f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go @@ -0,0 +1,248 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv4_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + b.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + t.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr()) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr()) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv4.PacketConn, data []byte, dst net.Addr) { + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go b/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go new file mode 100644 index 000000000..365de022a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go @@ -0,0 +1,388 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + b.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv4.Message{ + { + Buffers: [][]byte{payload}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv4.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv4.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv4.Message{ + { + Buffers: [][]byte{datagram}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv4.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv4.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + t.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), true) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), true) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv4.PacketConn, data []byte, dst net.Addr, batch bool) { + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + batchReader := func() { + defer wg.Done() + ms := []ipv4.Message{ + { + Buffers: [][]byte{make([]byte, 128)}, + OOB: ipv4.NewControlMessage(cf), + }, + } + n, err := p.ReadBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + var cm ipv4.ControlMessage + if err := cm.Parse(ms[0].OOB[:ms[0].NN]); err != nil { + t.Error(err) + return + } + var b []byte + if _, ok := dst.(*net.IPAddr); ok { + var h ipv4.Header + if err := h.Parse(ms[0].Buffers[0][:ms[0].N]); err != nil { + t.Error(err) + return + } + b = ms[0].Buffers[0][h.Len:ms[0].N] + } else { + b = ms[0].Buffers[0][:ms[0].N] + } + if !bytes.Equal(b, data) { + t.Errorf("got %#v; want %#v", b, data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + batchWriter := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + ms := []ipv4.Message{ + { + Buffers: [][]byte{data}, + OOB: cm.Marshal(), + Addr: dst, + }, + } + n, err := p.WriteBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + if ms[0].N != len(data) { + t.Errorf("got %d; want %d", ms[0].N, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + if batch { + go batchWriter(i%2 != 0) + } else { + go writer(i%2 != 0) + } + + } + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_test.go b/vendor/golang.org/x/net/ipv4/readwrite_test.go new file mode 100644 index 000000000..3896a8ae4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/readwrite_test.go @@ -0,0 +1,140 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func BenchmarkReadWriteUnicast(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + + dst := c.LocalAddr() + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + + b.Run("NetUDP", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(wb, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("IPv4UDP", func(b *testing.B) { + p := ipv4.NewPacketConn(c) + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + cm := ipv4.ControlMessage{TTL: 1} + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(wb, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) +} + +func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + rb := make([]byte, 128) + if n, cm, _, err := p.ReadFrom(rb); err != nil { + t.Error(err) + return + } else if !bytes.Equal(rb[:n], wb) { + t.Errorf("got %v; want %v", rb[:n], wb) + return + } else { + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + } + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Error(err) + return + } else if n != len(wb) { + t.Errorf("got %d; want %d", n, len(wb)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt.go b/vendor/golang.org/x/net/ipv4/sockopt.go new file mode 100644 index 000000000..22e90c039 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt.go @@ -0,0 +1,44 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTOS = iota // header field for unicast packet + ssoTTL // header field for unicast packet + ssoMulticastTTL // header field for multicast packet + ssoMulticastInterface // outbound interface for multicast packet + ssoMulticastLoopback // loopback for multicast packet + ssoReceiveTTL // header field on received packet + ssoReceiveDst // header field on received packet + ssoReceiveInterface // inbound interface on received packet + ssoPacketInfo // incbound or outbound packet path + ssoHeaderPrepend // ipv4 header prepend + ssoStripHeader // strip ipv4 header + ssoICMPFilter // icmp filter + ssoJoinGroup // any-source multicast + ssoLeaveGroup // any-source multicast + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeIPMreqn + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_posix.go new file mode 100644 index 000000000..e96955bc1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -0,0 +1,71 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + switch so.typ { + case ssoTypeIPMreqn: + return so.getIPMreqn(c) + default: + return so.getMulticastIf(c) + } +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + switch so.typ { + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, nil) + default: + return so.setMulticastIf(c, ifi) + } +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPFilter { + return nil, errOpNoSupport + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPFilter]byte)(unsafe.Pointer(f))[:sizeofICMPFilter] + return so.Set(c, b) +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errOpNoSupport + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go new file mode 100644 index 000000000..23249b782 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq.go b/vendor/golang.org/x/net/ipv4/sys_asmreq.go new file mode 100644 index 000000000..0388cba00 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -0,0 +1,119 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + mreq := ipMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} + if err := setIPMreqInterface(&mreq, ifi); err != nil { + return err + } + b := (*[sizeofIPMreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPMreq] + return so.Set(c, b) +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + var b [4]byte + if _, err := so.Get(c, b[:]); err != nil { + return nil, err + } + ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + ip, err := netInterfaceToIP4(ifi) + if err != nil { + return err + } + var b [4]byte + copy(b[:], ip) + return so.Set(c, b[:]) +} + +func setIPMreqInterface(mreq *ipMreq, ifi *net.Interface) error { + if ifi == nil { + return nil + } + ifat, err := ifi.Addrs() + if err != nil { + return err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + } + } + return errNoSuchInterface +} + +func netIP4ToInterface(ip net.IP) (*net.Interface, error) { + ift, err := net.Interfaces() + if err != nil { + return nil, err + } + for _, ifi := range ift { + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + case *net.IPNet: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + } + } + } + return nil, errNoSuchInterface +} + +func netInterfaceToIP4(ifi *net.Interface) (net.IP, error) { + if ifi == nil { + return net.IPv4zero.To4(), nil + } + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + } + } + return nil, errNoSuchInterface +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go new file mode 100644 index 000000000..f3919208b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go new file mode 100644 index 000000000..1f24f69f3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + b := make([]byte, so.Len) + if _, err := so.Get(c, b); err != nil { + return nil, err + } + mreqn := (*ipMreqn)(unsafe.Pointer(&b[0])) + if mreqn.Ifindex == 0 { + return nil, nil + } + ifi, err := net.InterfaceByIndex(int(mreqn.Ifindex)) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreqn ipMreqn + if ifi != nil { + mreqn.Ifindex = int32(ifi.Index) + } + if grp != nil { + mreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]} + } + b := (*[sizeofIPMreqn]byte)(unsafe.Pointer(&mreqn))[:sizeofIPMreqn] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go new file mode 100644 index 000000000..0711d3d78 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf.go b/vendor/golang.org/x/net/ipv4/sys_bpf.go new file mode 100644 index 000000000..9f30b7308 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go new file mode 100644 index 000000000..9a2132093 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/golang.org/x/net/ipv4/sys_bsd.go new file mode 100644 index 000000000..58256dd9d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_darwin.go b/vendor/golang.org/x/net/ipv4/sys_darwin.go new file mode 100644 index 000000000..e8fb19169 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_darwin.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "strconv" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoStripHeader: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_STRIPHDR, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) + +func init() { + // Seems like kern.osreldate is veiled on latest OS X. We use + // kern.osrelease instead. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + // The IP_PKTINFO and protocol-independent multicast API were + // introduced in OS X 10.7 (Darwin 11). But it looks like + // those features require OS X 10.8 (Darwin 12) or above. + // See http://support.apple.com/kb/HT1633. + if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 { + return + } + ctlOpts[ctlPacketInfo].name = sysIP_PKTINFO + ctlOpts[ctlPacketInfo].length = sizeofInetPktinfo + ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo + ctlOpts[ctlPacketInfo].parse = parsePacketInfo + sockOpts[ssoPacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}} + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + sockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} +} + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_dragonfly.go b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go new file mode 100644 index 000000000..859764f33 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go @@ -0,0 +1,35 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/vendor/golang.org/x/net/ipv4/sys_freebsd.go new file mode 100644 index 000000000..b80032454 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_freebsd.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") + if freebsdVersion >= 1000000 { + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + } + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_linux.go b/vendor/golang.org/x/net/ipv4/sys_linux.go new file mode 100644 index 000000000..60defe132 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_linux.go @@ -0,0 +1,59 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_TTL, 1, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_PKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysICMP_FILTER, Len: sizeofICMPFilter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_solaris.go b/vendor/golang.org/x/net/ipv4/sys_solaris.go new file mode 100644 index 000000000..832fef1e2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_solaris.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 4, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go new file mode 100644 index 000000000..ae5704e77 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var freebsd32o64 bool + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if freebsd32o64 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if freebsd32o64 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go new file mode 100644 index 000000000..e6b7623d0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go new file mode 100644 index 000000000..4f076473b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv4/sys_windows.go b/vendor/golang.org/x/net/ipv4/sys_windows.go new file mode 100644 index 000000000..b0913d539 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_windows.go @@ -0,0 +1,67 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_DONTFRAGMENT = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0xf + sysIP_DROP_SOURCE_MEMBERSHIP = 0x10 + sysIP_PKTINFO = 0x13 + + sizeofInetPktinfo = 0x8 + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc +) + +type inetPktinfo struct { + Addr [4]byte + Ifindex int32 +} + +type ipMreq struct { + Multiaddr [4]byte + Interface [4]byte +} + +type ipMreqSource struct { + Multiaddr [4]byte + Sourceaddr [4]byte + Interface [4]byte +} + +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms738586(v=vs.85).aspx +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} diff --git a/vendor/golang.org/x/net/ipv4/unicast_test.go b/vendor/golang.org/x/net/ipv4/unicast_test.go new file mode 100644 index 000000000..02c089f00 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/unicast_test.go @@ -0,0 +1,247 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func TestPacketConnReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + p.SetTTL(i + 1) + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, nil, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } +} + +func TestPacketConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + dst, err := net.ResolveIPAddr("ip4", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + p := ipv4.NewPacketConn(c) + defer p.Close() + cf := ipv4.FlagDst | ipv4.FlagInterface + if runtime.GOOS != "solaris" { + // Solaris never allows to modify ICMP properties. + cf |= ipv4.FlagTTL + } + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + p.SetTTL(i + 1) + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, nil, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + loop: + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) + if err != nil { + t.Fatal(err) + } + if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho { + // On Linux we must handle own sent packets. + goto loop + } + if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } +} + +func TestRawConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + dst, err := net.ResolveIPAddr("ip4", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + wh := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: i + 1, + TotalLen: ipv4.HeaderLen + len(wb), + TTL: i + 1, + Protocol: 1, + Dst: dst.IP, + } + if err := r.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := r.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if err := r.WriteTo(wh, wb, nil); err != nil { + t.Fatal(err) + } + rb := make([]byte, ipv4.HeaderLen+128) + loop: + if err := r.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if _, b, _, err := r.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho { + // On Linux we must handle own sent packets. + goto loop + } + if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go new file mode 100644 index 000000000..db5213b91 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go @@ -0,0 +1,148 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func TestConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + ln, err := net.Listen("tcp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + errc := make(chan error, 1) + go func() { + c, err := ln.Accept() + if err != nil { + errc <- err + return + } + errc <- c.Close() + }() + + c, err := net.Dial("tcp4", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv4.NewConn(c)) + + if err := <-errc; err != nil { + t.Errorf("server: %v", err) + } +} + +var packetConnUnicastSocketOptionTests = []struct { + net, proto, addr string +}{ + {"udp4", "", "127.0.0.1:0"}, + {"ip4", ":icmp", "127.0.0.1"}, +} + +func TestPacketConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnUnicastSocketOptionTests { + if tt.net == "ip4" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv4.NewPacketConn(c)) + } +} + +func TestRawConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + + testUnicastSocketOptions(t, r) +} + +type testIPv4UnicastConn interface { + TOS() (int, error) + SetTOS(int) error + TTL() (int, error) + SetTTL(int) error +} + +func testUnicastSocketOptions(t *testing.T, c testIPv4UnicastConn) { + tos := iana.DiffServCS0 | iana.NotECNTransport + switch runtime.GOOS { + case "windows": + // IP_TOS option is supported on Windows 8 and beyond. + t.Skipf("not supported on %s", runtime.GOOS) + } + + if err := c.SetTOS(tos); err != nil { + t.Fatal(err) + } + if v, err := c.TOS(); err != nil { + t.Fatal(err) + } else if v != tos { + t.Fatalf("got %v; want %v", v, tos) + } + const ttl = 255 + if err := c.SetTTL(ttl); err != nil { + t.Fatal(err) + } + if v, err := c.TTL(); err != nil { + t.Fatal(err) + } else if v != ttl { + t.Fatalf("got %v; want %v", v, ttl) + } +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/vendor/golang.org/x/net/ipv4/zsys_darwin.go new file mode 100644 index 000000000..c07cc883f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_darwin.go @@ -0,0 +1,99 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_STRIPHDR = 0x17 + sysIP_RECVTTL = 0x18 + sysIP_BOUND_IF = 0x19 + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_MULTICAST_IFINDEX = 0x42 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go new file mode 100644 index 000000000..c4365e9e7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go @@ -0,0 +1,31 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x41 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go new file mode 100644 index 000000000..8c4aec94c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go new file mode 100644 index 000000000..4b10b7c57 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go @@ -0,0 +1,95 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go new file mode 100644 index 000000000..4b10b7c57 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go @@ -0,0 +1,95 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go new file mode 100644 index 000000000..c0260f0ce --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go new file mode 100644 index 000000000..9c967eaa6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go new file mode 100644 index 000000000..c0260f0ce --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go new file mode 100644 index 000000000..9c967eaa6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go new file mode 100644 index 000000000..c0260f0ce --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go new file mode 100644 index 000000000..9c967eaa6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go new file mode 100644 index 000000000..9c967eaa6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go new file mode 100644 index 000000000..c0260f0ce --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go new file mode 100644 index 000000000..f65bd9a7a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go new file mode 100644 index 000000000..9c967eaa6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go new file mode 100644 index 000000000..9c967eaa6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go new file mode 100644 index 000000000..9c967eaa6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go new file mode 100644 index 000000000..fd3624d93 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go @@ -0,0 +1,30 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x17 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go new file mode 100644 index 000000000..12f36be75 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go @@ -0,0 +1,30 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x1e + sysIP_RECVTTL = 0x1f + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/vendor/golang.org/x/net/ipv4/zsys_solaris.go new file mode 100644 index 000000000..0a3875cc4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_solaris.go @@ -0,0 +1,100 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x9 + sysIP_RECVSLLA = 0xa + sysIP_RECVTTL = 0xb + + sysIP_MULTICAST_IF = 0x10 + sysIP_MULTICAST_TTL = 0x11 + sysIP_MULTICAST_LOOP = 0x12 + sysIP_ADD_MEMBERSHIP = 0x13 + sysIP_DROP_MEMBERSHIP = 0x14 + sysIP_BLOCK_SOURCE = 0x15 + sysIP_UNBLOCK_SOURCE = 0x16 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x17 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x18 + sysIP_NEXTHOP = 0x19 + + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + sysIP_DONTFRAG = 0x1b + + sysIP_BOUND_IF = 0x41 + sysIP_UNSPEC_SRC = 0x42 + sysIP_BROADCAST_TTL = 0x43 + sysIP_DHCPINIT_IF = 0x45 + + sysIP_REUSEADDR = 0x104 + sysIP_DONTROUTE = 0x105 + sysIP_BROADCAST = 0x106 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} diff --git a/vendor/golang.org/x/net/ipv6/batch.go b/vendor/golang.org/x/net/ipv6/batch.go new file mode 100644 index 000000000..4f5fe683d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/batch.go @@ -0,0 +1,119 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv6 + +import ( + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv6/bpf_test.go b/vendor/golang.org/x/net/ipv6/bpf_test.go new file mode 100644 index 000000000..8253e1f42 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/bpf_test.go @@ -0,0 +1,96 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv6" +) + +func TestBPF(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + l, err := net.ListenPacket("udp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + p := ipv6.NewPacketConn(l) + + // This filter accepts UDP packets whose first payload byte is + // even. + prog, err := bpf.Assemble([]bpf.Instruction{ + // Load the first byte of the payload (skipping UDP header). + bpf.LoadAbsolute{Off: 8, Size: 1}, + // Select LSB of the byte. + bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1}, + // Byte is even? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1}, + // Accept. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("compiling BPF: %s", err) + } + + if err = p.SetBPF(prog); err != nil { + t.Fatalf("attaching filter to Conn: %s", err) + } + + s, err := net.Dial("udp6", l.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer s.Close() + go func() { + for i := byte(0); i < 10; i++ { + s.Write([]byte{i}) + } + }() + + l.SetDeadline(time.Now().Add(2 * time.Second)) + seen := make([]bool, 5) + for { + var b [512]byte + n, _, err := l.ReadFrom(b[:]) + if err != nil { + t.Fatalf("reading from listener: %s", err) + } + if n != 1 { + t.Fatalf("unexpected packet length, want 1, got %d", n) + } + if b[0] >= 10 { + t.Fatalf("unexpected byte, want 0-9, got %d", b[0]) + } + if b[0]%2 != 0 { + t.Fatalf("got odd byte %d, wanted only even bytes", b[0]) + } + seen[b[0]/2] = true + + seenAll := true + for _, v := range seen { + if !v { + seenAll = false + break + } + } + if seenAll { + break + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/control.go b/vendor/golang.org/x/net/ipv6/control.go new file mode 100644 index 000000000..2da644413 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +// Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the +// former still support RFC 2292 only. Please be aware that almost +// all protocol implementations prohibit using a combination of RFC +// 2292 and RFC 3542 for some practical reasons. + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +// A ControlFlags represents per packet basis IP-level socket option +// control flags. +type ControlFlags uint + +const ( + FlagTrafficClass ControlFlags = 1 << iota // pass the traffic class on the received packet + FlagHopLimit // pass the hop limit on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet + FlagPathMTU // pass the path MTU on the received packet path +) + +const flagPacketInfo = FlagDst | FlagInterface + +// A ControlMessage represents per packet basis IP-level socket +// options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn allows to send the options to the + // protocol stack. + // + TrafficClass int // traffic class, must be 1 <= value <= 255 when specifying + HopLimit int // hop limit, must be 1 <= value <= 255 when specifying + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying + NextHop net.IP // next hop address, specifying only + MTU int // path MTU, receiving only +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var l int + tclass := false + if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 { + tclass = true + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + hoplimit := false + if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 { + hoplimit = true + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + pktinfo := false + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) { + pktinfo = true + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + nexthop := false + if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil { + nexthop = true + l += socket.ControlMessageSpace(ctlOpts[ctlNextHop].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + bb := b + if tclass { + bb = ctlOpts[ctlTrafficClass].marshal(bb, cm) + } + if hoplimit { + bb = ctlOpts[ctlHopLimit].marshal(bb, cm) + } + if pktinfo { + bb = ctlOpts[ctlPacketInfo].marshal(bb, cm) + } + if nexthop { + bb = ctlOpts[ctlNextHop].marshal(bb, cm) + } + } + return b +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIPv6 { + continue + } + switch { + case typ == ctlOpts[ctlTrafficClass].name && l >= ctlOpts[ctlTrafficClass].length: + ctlOpts[ctlTrafficClass].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlHopLimit].name && l >= ctlOpts[ctlHopLimit].length: + ctlOpts[ctlHopLimit].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPathMTU].name && l >= ctlOpts[ctlPathMTU].length: + ctlOpts[ctlPathMTU].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPathMTU].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTrafficClass = iota // header field + ctlHopLimit // header field + ctlPacketInfo // inbound or outbound packet path + ctlNextHop // nexthop + ctlPathMTU // path mtu + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go new file mode 100644 index 000000000..9fd9eb15e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func marshal2292NextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go new file mode 100644 index 000000000..eec529c20 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -0,0 +1,94 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalTrafficClass(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_TCLASS, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.TrafficClass)) + } + return m.Next(4) +} + +func parseTrafficClass(cm *ControlMessage, b []byte) { + cm.TrafficClass = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalHopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func parseHopLimit(cm *ControlMessage, b []byte) { + cm.HopLimit = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inet6Pktinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, pi.Addr[:]) + cm.IfIndex = int(pi.Ifindex) +} + +func marshalNextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} + +func parseNextHop(cm *ControlMessage, b []byte) { +} + +func marshalPathMTU(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PATHMTU, sizeofIPv6Mtuinfo) + return m.Next(sizeofIPv6Mtuinfo) +} + +func parsePathMTU(cm *ControlMessage, b []byte) { + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, mi.Addr.Addr[:]) + cm.IfIndex = int(mi.Addr.Scope_id) + cm.MTU = int(mi.Mtu) +} diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go new file mode 100644 index 000000000..a045f28f7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/control_test.go b/vendor/golang.org/x/net/ipv6/control_test.go new file mode 100644 index 000000000..c186ca99f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_test.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "testing" + + "golang.org/x/net/ipv6" +) + +func TestControlMessageParseWithFuzz(t *testing.T) { + var cm ipv6.ControlMessage + for _, fuzz := range []string{ + "\f\x00\x00\x00)\x00\x00\x00.\x00\x00\x00", + "\f\x00\x00\x00)\x00\x00\x00,\x00\x00\x00", + } { + cm.Parse([]byte(fuzz)) + } +} diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go new file mode 100644 index 000000000..66515060a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTrafficClass]; ok && cf&FlagTrafficClass != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTrafficClass) + } else { + opt.clear(FlagTrafficClass) + } + } + if so, ok := sockOpts[ssoReceiveHopLimit]; ok && cf&FlagHopLimit != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagHopLimit) + } else { + opt.clear(FlagHopLimit) + } + } + if so, ok := sockOpts[ssoReceivePacketInfo]; ok && cf&flagPacketInfo != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & flagPacketInfo) + } else { + opt.clear(cf & flagPacketInfo) + } + } + if so, ok := sockOpts[ssoReceivePathMTU]; ok && cf&FlagPathMTU != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagPathMTU) + } else { + opt.clear(FlagPathMTU) + } + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/control_windows.go b/vendor/golang.org/x/net/ipv6/control_windows.go new file mode 100644 index 000000000..ef2563b3f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_windows.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "syscall" + + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return syscall.EWINDOWS +} diff --git a/vendor/golang.org/x/net/ipv6/defs_darwin.go b/vendor/golang.org/x/net/ipv6/defs_darwin.go new file mode 100644 index 000000000..55ddc116f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_darwin.go @@ -0,0 +1,112 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#define __APPLE_USE_RFC_3542 +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go new file mode 100644 index 000000000..a4c383a51 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go @@ -0,0 +1,84 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/vendor/golang.org/x/net/ipv6/defs_freebsd.go new file mode 100644 index 000000000..53e625389 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_freebsd.go @@ -0,0 +1,105 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_BINDANY = C.IPV6_BINDANY + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_linux.go b/vendor/golang.org/x/net/ipv6/defs_linux.go new file mode 100644 index 000000000..3308cb2c3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_linux.go @@ -0,0 +1,147 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + sysIPV6_ADDRFORM = C.IPV6_ADDRFORM + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_FLOWINFO = C.IPV6_FLOWINFO + + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP + sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT + sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER + sysIPV6_MTU = C.IPV6_MTU + sysIPV6_RECVERR = C.IPV6_RECVERR + sysIPV6_V6ONLY = C.IPV6_V6ONLY + sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST + sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST + + //sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT + //sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT + //sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO + //sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE + //sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE + //sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT + + sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR + sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES + + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + + sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT + + sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR + sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR + sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT + sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF + + sysICMPV6_FILTER = C.ICMPV6_FILTER + + sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK + sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS + sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS + sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY + + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER + + sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + + sizeofSockFprog = C.sizeof_struct_sock_fprog +) + +type kernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6FlowlabelReq C.struct_in6_flowlabel_req + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter + +type sockFProg C.struct_sock_fprog + +type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/vendor/golang.org/x/net/ipv6/defs_netbsd.go new file mode 100644 index 000000000..be9ceb9cc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_netbsd.go @@ -0,0 +1,80 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/vendor/golang.org/x/net/ipv6/defs_openbsd.go new file mode 100644 index 000000000..177ddf87d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_openbsd.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL + sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL + sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL + sysIPSEC6_OUTSA = C.IPSEC6_OUTSA + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_PIPEX = C.IPV6_PIPEX + + sysIPV6_RTABLE = C.IPV6_RTABLE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_solaris.go b/vendor/golang.org/x/net/ipv6/defs_solaris.go new file mode 100644 index 000000000..0f8ce2b46 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_solaris.go @@ -0,0 +1,114 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + + sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_SEC_OPT = C.IPV6_SEC_OPT + sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + + sysIPV6_PREFER_SRC_MIPMASK = C.IPV6_PREFER_SRC_MIPMASK + sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT + sysIPV6_PREFER_SRC_TMPMASK = C.IPV6_PREFER_SRC_TMPMASK + sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT + sysIPV6_PREFER_SRC_CGAMASK = C.IPV6_PREFER_SRC_CGAMASK + sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT + + sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK + + sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC + + sysICMP6_FILTER = C.ICMP6_FILTER + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/dgramopt.go b/vendor/golang.org/x/net/ipv6/dgramopt.go new file mode 100644 index 000000000..703dafe84 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -0,0 +1,302 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/bpf" +) + +// MulticastHopLimit returns the hop limit field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastHopLimit() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetMulticastHopLimit sets the hop limit field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, hoplim) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errOpNoSupport + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errOpNoSupport + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errOpNoSupport + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// Checksum reports whether the kernel will compute, store or verify a +// checksum for both incoming and outgoing packets. If on is true, it +// returns an offset in bytes into the data of where the checksum +// field is located. +func (c *dgramOpt) Checksum() (on bool, offset int, err error) { + if !c.ok() { + return false, 0, syscall.EINVAL + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return false, 0, errOpNoSupport + } + offset, err = so.GetInt(c.Conn) + if err != nil { + return false, 0, err + } + if offset < 0 { + return false, 0, nil + } + return true, offset, nil +} + +// SetChecksum enables the kernel checksum processing. If on is ture, +// the offset should be an offset in bytes into the data of where the +// checksum field is located. +func (c *dgramOpt) SetChecksum(on bool, offset int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return errOpNoSupport + } + if !on { + offset = -1 + } + return so.SetInt(c.Conn, offset) +} + +// ICMPFilter returns an ICMP filter. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errOpNoSupport + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errOpNoSupport + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errOpNoSupport + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go new file mode 100644 index 000000000..664a97dea --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -0,0 +1,243 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv6 implements IP-level socket options for the Internet +// Protocol version 6. +// +// The package provides IP-level socket options that allow +// manipulation of IPv6 facilities. +// +// The IPv6 protocol is defined in RFC 8200. +// Socket interface extensions are defined in RFC 3493, RFC 3542 and +// RFC 3678. +// MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810. +// Source-specific multicast is defined in RFC 4607. +// +// On Darwin, this package requires OS X Mavericks version 10.9 or +// above, or equivalent. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv6 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the traffic class field on the IPv6 +// header for each packet. +// +// ln, err := net.Listen("tcp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPconn which are created as network connections that use the +// IPv6 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.ParseIP("ff02::114") +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv6 and Ethernet. +// +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv6.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, rcm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if rcm.Dst.IsMulticast() { +// if rcm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTrafficClass(0x0) +// p.SetHopLimit(16) +// if _, err := p.WriteTo(data[:n], nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// wcm := ipv6.ControlMessage{TrafficClass: 0xe0, HopLimit: 1} +// for _, ifi := range []*net.Interface{en0, en1} { +// wcm.IfIndex = ifi.Index +// if _, err := p.WriteTo(data[:n], &wcm, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn may join multiple multicast +// groups. For example, a UDP listener with port 1024 might join two +// different groups across over two different network interfaces by +// using: +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv6.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// p2 := ipv6.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff01::114")}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn on MLDv2 supported platform is +// able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.ParseIP("ff32::8000:9")} +// ssmsource := net.UDPAddr{IP: net.ParseIP("fe80::cafe")} +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.ParseIP("fe80::dead")} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on MLDv2 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// MLDv1 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv6 // import "golang.org/x/net/ipv6" + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/golang.org/x/net/ipv6/endpoint.go new file mode 100644 index 000000000..0624c1740 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/endpoint.go @@ -0,0 +1,128 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn are not +// implemented. + +// A Conn represents a network endpoint that uses IPv6 transport. +// It allows to set basic IP-level socket options such as traffic +// class and hop limit. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// PathMTU returns a path MTU value for the destination associated +// with the endpoint. +func (c *Conn) PathMTU() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoPathMTU] + if !ok { + return 0, errOpNoSupport + } + _, mtu, err := so.getMTUInfo(c.Conn) + if err != nil { + return 0, err + } + return mtu, nil +} + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses IPv6 +// transport. It is used to control several IP-level socket options +// including IPv6 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv6 and higher layer +// protocols such as OSPF, GRE, and UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage allows to receive the per packet basis IP-level +// socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + return &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } +} diff --git a/vendor/golang.org/x/net/ipv6/example_test.go b/vendor/golang.org/x/net/ipv6/example_test.go new file mode 100644 index 000000000..e761aa2a1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/example_test.go @@ -0,0 +1,216 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "fmt" + "log" + "net" + "os" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv6" +) + +func ExampleConn_markingTCP() { + ln, err := net.Listen("tcp", "[::]:1024") + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + for { + c, err := ln.Accept() + if err != nil { + log.Fatal(err) + } + go func(c net.Conn) { + defer c.Close() + if c.RemoteAddr().(*net.TCPAddr).IP.To16() != nil && c.RemoteAddr().(*net.TCPAddr).IP.To4() == nil { + p := ipv6.NewConn(c) + if err := p.SetTrafficClass(0x28); err != nil { // DSCP AF11 + log.Fatal(err) + } + if err := p.SetHopLimit(128); err != nil { + log.Fatal(err) + } + } + if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil { + log.Fatal(err) + } + }(c) + } +} + +func ExamplePacketConn_servingOneShotMulticastDNS() { + c, err := net.ListenPacket("udp6", "[::]:5353") // mDNS over UDP + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + mDNSLinkLocal := net.UDPAddr{IP: net.ParseIP("ff02::fb")} + if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &mDNSLinkLocal) + if err := p.SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { + log.Fatal(err) + } + + var wcm ipv6.ControlMessage + b := make([]byte, 1500) + for { + _, rcm, peer, err := p.ReadFrom(b) + if err != nil { + log.Fatal(err) + } + if !rcm.Dst.IsMulticast() || !rcm.Dst.Equal(mDNSLinkLocal.IP) { + continue + } + wcm.IfIndex = rcm.IfIndex + answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this + if _, err := p.WriteTo(answers, &wcm, peer); err != nil { + log.Fatal(err) + } + } +} + +func ExamplePacketConn_tracingIPPacketRoute() { + // Tracing an IP packet route to www.google.com. + + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + log.Fatal(err) + } + var dst net.IPAddr + for _, ip := range ips { + if ip.To16() != nil && ip.To4() == nil { + dst.IP = ip + fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host) + break + } + } + if dst.IP == nil { + log.Fatal("no AAAA record found") + } + + c, err := net.ListenPacket("ip6:58", "::") // ICMP for IPv6 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + if err := p.SetControlMessage(ipv6.FlagHopLimit|ipv6.FlagSrc|ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { + log.Fatal(err) + } + wm := icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + } + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeTimeExceeded) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + log.Fatal(err) + } + + var wcm ipv6.ControlMessage + rb := make([]byte, 1500) + for i := 1; i <= 64; i++ { // up to 64 hops + wm.Body.(*icmp.Echo).Seq = i + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + + // In the real world usually there are several + // multiple traffic-engineered paths for each hop. + // You may need to probe a few times to each hop. + begin := time.Now() + wcm.HopLimit = i + if _, err := p.WriteTo(wb, &wcm, &dst); err != nil { + log.Fatal(err) + } + if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + log.Fatal(err) + } + n, rcm, peer, err := p.ReadFrom(rb) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + fmt.Printf("%v\t*\n", i) + continue + } + log.Fatal(err) + } + rm, err := icmp.ParseMessage(58, rb[:n]) + if err != nil { + log.Fatal(err) + } + rtt := time.Since(begin) + + // In the real world you need to determine whether the + // received message is yours using ControlMessage.Src, + // ControlMesage.Dst, icmp.Echo.ID and icmp.Echo.Seq. + switch rm.Type { + case ipv6.ICMPTypeTimeExceeded: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm) + case ipv6.ICMPTypeEchoReply: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm) + return + } + } +} + +func ExamplePacketConn_advertisingOSPFHello() { + c, err := net.ListenPacket("ip6:89", "::") // OSPF for IPv6 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + allSPFRouters := net.IPAddr{IP: net.ParseIP("ff02::5")} + if err := p.JoinGroup(en0, &allSPFRouters); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &allSPFRouters) + + hello := make([]byte, 24) // fake hello data, you need to implement this + ospf := make([]byte, 16) // fake ospf header, you need to implement this + ospf[0] = 3 // version 3 + ospf[1] = 1 // hello packet + ospf = append(ospf, hello...) + if err := p.SetChecksum(true, 12); err != nil { + log.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: 0xc0, // DSCP CS6 + HopLimit: 1, + IfIndex: en0.Index, + } + if _, err := p.WriteTo(ospf, &cm, &allSPFRouters); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/golang.org/x/net/ipv6/gen.go b/vendor/golang.org/x/net/ipv6/gen.go new file mode 100644 index 000000000..47b7e9f0a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/gen.go @@ -0,0 +1,199 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml", + parseICMPv6Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "package ipv6\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv6Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv6Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigName) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv6Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv6ParamRecord struct { + OrigName string + Name string + Value int +} + +func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Name, "Reserved") || + strings.Contains(pr.Name, "Unassigned") || + strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "Experiment") || + strings.Contains(pr.Name, "experiment") { + continue + } + ss := strings.Split(pr.Name, "\n") + if len(ss) > 1 { + prs[i].Name = strings.Join(ss, " ") + } else { + prs[i].Name = ss[0] + } + s := strings.TrimSpace(prs[i].Name) + prs[i].OrigName = s + prs[i].Name = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv6/genericopt.go b/vendor/golang.org/x/net/ipv6/genericopt.go new file mode 100644 index 000000000..e9dbc2e18 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/genericopt.go @@ -0,0 +1,58 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "syscall" + +// TrafficClass returns the traffic class field value for outgoing +// packets. +func (c *genericOpt) TrafficClass() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTrafficClass sets the traffic class field value for future +// outgoing packets. +func (c *genericOpt) SetTrafficClass(tclass int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, tclass) +} + +// HopLimit returns the hop limit field value for outgoing packets. +func (c *genericOpt) HopLimit() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetHopLimit sets the hop limit field value for future outgoing +// packets. +func (c *genericOpt) SetHopLimit(hoplim int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, hoplim) +} diff --git a/vendor/golang.org/x/net/ipv6/header.go b/vendor/golang.org/x/net/ipv6/header.go new file mode 100644 index 000000000..e05cb08b2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "encoding/binary" + "fmt" + "net" +) + +const ( + Version = 6 // protocol version + HeaderLen = 40 // header length +) + +// A Header represents an IPv6 base header. +type Header struct { + Version int // protocol version + TrafficClass int // traffic class + FlowLabel int // flow label + PayloadLen int // payload length + NextHeader int // next header + HopLimit int // hop limit + Src net.IP // source address + Dst net.IP // destination address +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d tclass=%#x flowlbl=%#x payloadlen=%d nxthdr=%d hoplim=%d src=%v dst=%v", h.Version, h.TrafficClass, h.FlowLabel, h.PayloadLen, h.NextHeader, h.HopLimit, h.Src, h.Dst) +} + +// ParseHeader parses b as an IPv6 base header. +func ParseHeader(b []byte) (*Header, error) { + if len(b) < HeaderLen { + return nil, errHeaderTooShort + } + h := &Header{ + Version: int(b[0]) >> 4, + TrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4, + FlowLabel: int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]), + PayloadLen: int(binary.BigEndian.Uint16(b[4:6])), + NextHeader: int(b[6]), + HopLimit: int(b[7]), + } + h.Src = make(net.IP, net.IPv6len) + copy(h.Src, b[8:24]) + h.Dst = make(net.IP, net.IPv6len) + copy(h.Dst, b[24:40]) + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv6/header_test.go b/vendor/golang.org/x/net/ipv6/header_test.go new file mode 100644 index 000000000..ca11dc23d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header_test.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "reflect" + "strings" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv6" +) + +var ( + wireHeaderFromKernel = [ipv6.HeaderLen]byte{ + 0x69, 0x8b, 0xee, 0xf1, + 0xca, 0xfe, 0x2c, 0x01, + 0x20, 0x01, 0x0d, 0xb8, + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, + 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + } + + testHeader = &ipv6.Header{ + Version: ipv6.Version, + TrafficClass: iana.DiffServAF43, + FlowLabel: 0xbeef1, + PayloadLen: 0xcafe, + NextHeader: iana.ProtocolIPv6Frag, + HopLimit: 1, + Src: net.ParseIP("2001:db8:1::1"), + Dst: net.ParseIP("2001:db8:2::1"), + } +) + +func TestParseHeader(t *testing.T) { + h, err := ipv6.ParseHeader(wireHeaderFromKernel[:]) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, testHeader) { + t.Fatalf("got %#v; want %#v", h, testHeader) + } + s := h.String() + if strings.Contains(s, ",") { + t.Fatalf("should be space-separated values: %s", s) + } +} diff --git a/vendor/golang.org/x/net/ipv6/helper.go b/vendor/golang.org/x/net/ipv6/helper.go new file mode 100644 index 000000000..259740132 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "errors" + "net" +) + +var ( + errMissingAddress = errors.New("missing address") + errHeaderTooShort = errors.New("header too short") + errInvalidConnType = errors.New("invalid conn type") + errOpNoSupport = errors.New("operation not supported") + errNoSuchInterface = errors.New("no such interface") +) + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP16(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv6/iana.go b/vendor/golang.org/x/net/ipv6/iana.go new file mode 100644 index 000000000..3c6214fb6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/iana.go @@ -0,0 +1,82 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package ipv6 + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 +const ( + ICMPTypeDestinationUnreachable ICMPType = 1 // Destination Unreachable + ICMPTypePacketTooBig ICMPType = 2 // Packet Too Big + ICMPTypeTimeExceeded ICMPType = 3 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 4 // Parameter Problem + ICMPTypeEchoRequest ICMPType = 128 // Echo Request + ICMPTypeEchoReply ICMPType = 129 // Echo Reply + ICMPTypeMulticastListenerQuery ICMPType = 130 // Multicast Listener Query + ICMPTypeMulticastListenerReport ICMPType = 131 // Multicast Listener Report + ICMPTypeMulticastListenerDone ICMPType = 132 // Multicast Listener Done + ICMPTypeRouterSolicitation ICMPType = 133 // Router Solicitation + ICMPTypeRouterAdvertisement ICMPType = 134 // Router Advertisement + ICMPTypeNeighborSolicitation ICMPType = 135 // Neighbor Solicitation + ICMPTypeNeighborAdvertisement ICMPType = 136 // Neighbor Advertisement + ICMPTypeRedirect ICMPType = 137 // Redirect Message + ICMPTypeRouterRenumbering ICMPType = 138 // Router Renumbering + ICMPTypeNodeInformationQuery ICMPType = 139 // ICMP Node Information Query + ICMPTypeNodeInformationResponse ICMPType = 140 // ICMP Node Information Response + ICMPTypeInverseNeighborDiscoverySolicitation ICMPType = 141 // Inverse Neighbor Discovery Solicitation Message + ICMPTypeInverseNeighborDiscoveryAdvertisement ICMPType = 142 // Inverse Neighbor Discovery Advertisement Message + ICMPTypeVersion2MulticastListenerReport ICMPType = 143 // Version 2 Multicast Listener Report + ICMPTypeHomeAgentAddressDiscoveryRequest ICMPType = 144 // Home Agent Address Discovery Request Message + ICMPTypeHomeAgentAddressDiscoveryReply ICMPType = 145 // Home Agent Address Discovery Reply Message + ICMPTypeMobilePrefixSolicitation ICMPType = 146 // Mobile Prefix Solicitation + ICMPTypeMobilePrefixAdvertisement ICMPType = 147 // Mobile Prefix Advertisement + ICMPTypeCertificationPathSolicitation ICMPType = 148 // Certification Path Solicitation Message + ICMPTypeCertificationPathAdvertisement ICMPType = 149 // Certification Path Advertisement Message + ICMPTypeMulticastRouterAdvertisement ICMPType = 151 // Multicast Router Advertisement + ICMPTypeMulticastRouterSolicitation ICMPType = 152 // Multicast Router Solicitation + ICMPTypeMulticastRouterTermination ICMPType = 153 // Multicast Router Termination + ICMPTypeFMIPv6 ICMPType = 154 // FMIPv6 Messages + ICMPTypeRPLControl ICMPType = 155 // RPL Control Message + ICMPTypeILNPv6LocatorUpdate ICMPType = 156 // ILNPv6 Locator Update Message + ICMPTypeDuplicateAddressRequest ICMPType = 157 // Duplicate Address Request + ICMPTypeDuplicateAddressConfirmation ICMPType = 158 // Duplicate Address Confirmation + ICMPTypeMPLControl ICMPType = 159 // MPL Control Message +) + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 +var icmpTypes = map[ICMPType]string{ + 1: "destination unreachable", + 2: "packet too big", + 3: "time exceeded", + 4: "parameter problem", + 128: "echo request", + 129: "echo reply", + 130: "multicast listener query", + 131: "multicast listener report", + 132: "multicast listener done", + 133: "router solicitation", + 134: "router advertisement", + 135: "neighbor solicitation", + 136: "neighbor advertisement", + 137: "redirect message", + 138: "router renumbering", + 139: "icmp node information query", + 140: "icmp node information response", + 141: "inverse neighbor discovery solicitation message", + 142: "inverse neighbor discovery advertisement message", + 143: "version 2 multicast listener report", + 144: "home agent address discovery request message", + 145: "home agent address discovery reply message", + 146: "mobile prefix solicitation", + 147: "mobile prefix advertisement", + 148: "certification path solicitation message", + 149: "certification path advertisement message", + 151: "multicast router advertisement", + 152: "multicast router solicitation", + 153: "multicast router termination", + 154: "fmipv6 messages", + 155: "rpl control message", + 156: "ilnpv6 locator update message", + 157: "duplicate address request", + 158: "duplicate address confirmation", + 159: "mpl control message", +} diff --git a/vendor/golang.org/x/net/ipv6/icmp.go b/vendor/golang.org/x/net/ipv6/icmp.go new file mode 100644 index 000000000..b7f48e27b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/iana" + +// BUG(mikio): On Windows, methods related to ICMPFilter are not +// implemented. + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv6 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolIPv6ICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model. A node means a +// device that implements IP. A router means a node that forwards IP +// packets not explicitly addressed to itself, and a host means a node +// that is not a router. +type ICMPFilter struct { + icmpv6Filter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go new file mode 100644 index 000000000..e1a791de4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Filt { + if block { + f.Filt[i] = 0 + } else { + f.Filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_linux.go b/vendor/golang.org/x/net/ipv6/icmp_linux.go new file mode 100644 index 000000000..647f6b44f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_linux.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Data[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Data[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Data { + if block { + f.Data[i] = 1<<32 - 1 + } else { + f.Data[i] = 0 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_solaris.go b/vendor/golang.org/x/net/ipv6/icmp_solaris.go new file mode 100644 index 000000000..7c23bb1cf --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_solaris.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.X__icmp6_filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.X__icmp6_filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.X__icmp6_filt { + if block { + f.X__icmp6_filt[i] = 0 + } else { + f.X__icmp6_filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.X__icmp6_filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go new file mode 100644 index 000000000..c4b9be6db --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +type icmpv6Filter struct { +} + +func (f *icmpv6Filter) accept(typ ICMPType) { +} + +func (f *icmpv6Filter) block(typ ICMPType) { +} + +func (f *icmpv6Filter) setAll(block bool) { +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_test.go b/vendor/golang.org/x/net/ipv6/icmp_test.go new file mode 100644 index 000000000..d8e9675dc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_test.go @@ -0,0 +1,96 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var icmpStringTests = []struct { + in ipv6.ICMPType + out string +}{ + {ipv6.ICMPTypeDestinationUnreachable, "destination unreachable"}, + + {256, ""}, +} + +func TestICMPString(t *testing.T) { + for _, tt := range icmpStringTests { + s := tt.in.String() + if s != tt.out { + t.Errorf("got %s; want %s", s, tt.out) + } + } +} + +func TestICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + var f ipv6.ICMPFilter + for _, toggle := range []bool{false, true} { + f.SetAll(toggle) + for _, typ := range []ipv6.ICMPType{ + ipv6.ICMPTypeDestinationUnreachable, + ipv6.ICMPTypeEchoReply, + ipv6.ICMPTypeNeighborSolicitation, + ipv6.ICMPTypeDuplicateAddressConfirmation, + } { + f.Accept(typ) + if f.WillBlock(typ) { + t.Errorf("ipv6.ICMPFilter.Set(%v, false) failed", typ) + } + f.Block(typ) + if !f.WillBlock(typ) { + t.Errorf("ipv6.ICMPFilter.Set(%v, true) failed", typ) + } + } + } +} + +func TestSetICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoRequest) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + kf, err := p.ICMPFilter() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(kf, &f) { + t.Fatalf("got %#v; want %#v", kf, f) + } +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_windows.go b/vendor/golang.org/x/net/ipv6/icmp_windows.go new file mode 100644 index 000000000..443cd0736 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_windows.go @@ -0,0 +1,22 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) block(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) setAll(block bool) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + // TODO(mikio): implement this + return false +} diff --git a/vendor/golang.org/x/net/ipv6/mocktransponder_test.go b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go new file mode 100644 index 000000000..6efe56c68 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go @@ -0,0 +1,32 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "testing" +) + +func connector(t *testing.T, network, addr string, done chan<- bool) { + defer func() { done <- true }() + + c, err := net.Dial(network, addr) + if err != nil { + t.Error(err) + return + } + c.Close() +} + +func acceptor(t *testing.T, ln net.Listener, done chan<- bool) { + defer func() { done <- true }() + + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + c.Close() +} diff --git a/vendor/golang.org/x/net/ipv6/multicast_test.go b/vendor/golang.org/x/net/ipv6/multicast_test.go new file mode 100644 index 000000000..69a21cd38 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicast_test.go @@ -0,0 +1,264 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var packetConnReadWriteMulticastUDPTests = []struct { + addr string + grp, src *net.UDPAddr +}{ + {"[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + + {"[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if !nettest.SupportsIPv6MulticastDeliveryOnLoopback() { + t.Skipf("multicast delivery doesn't work correctly on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastUDPTests { + c, err := net.ListenPacket("udp6", tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + grp := *tt.grp + grp.Port = c.LocalAddr().(*net.UDPAddr).Port + p := ipv6.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, &grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, &grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + IfIndex: ifi.Index, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + cm.HopLimit = i + 1 + if n, err := p.WriteTo(wb, &cm, &grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatal(err) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } + } +} + +var packetConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if !nettest.SupportsIPv6MulticastDeliveryOnLoopback() { + t.Skipf("multicast delivery doesn't work correctly on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip6:ipv6-icmp", "::") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, tt.grp.IP) + p := ipv6.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, tt.grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + IfIndex: ifi.Index, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + + var psh []byte + for i, toggle := range []bool{true, false, true} { + if toggle { + psh = nil + if err := p.SetChecksum(true, 2); err != nil { + // Solaris never allows to + // modify ICMP properties. + if runtime.GOOS != "solaris" { + t.Fatal(err) + } + } + } else { + psh = pshicmp + // Some platforms never allow to + // disable the kernel checksum + // processing. + p.SetChecksum(false, -1) + } + wb, err := (&icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(psh) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + cm.HopLimit = i + 1 + if n, err := p.WriteTo(wb, &cm, tt.grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil { + t.Fatal(err) + } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0) + } + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/multicastlistener_test.go b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go new file mode 100644 index 000000000..b27713e2f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go @@ -0,0 +1,261 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var udpMultipleGroupListenerTests = []net.Addr{ + &net.UDPAddr{IP: net.ParseIP("ff02::114")}, // see RFC 4727 + &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}, + &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}, +} + +func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c, err := net.ListenPacket("udp6", "[::]:0") // wildcard address with non-reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } +} + +func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c1, err := net.ListenPacket("udp6", "[ff02::]:0") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c1.Close() + _, port, err := net.SplitHostPort(c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + c2, err := net.ListenPacket("udp6", net.JoinHostPort("ff02::", port)) // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + var ps [2]*ipv6.PacketConn + ps[0] = ipv6.NewPacketConn(c1) + ps[1] = ipv6.NewPacketConn(c2) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + for _, p := range ps { + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + for _, p := range ps { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } + } +} + +func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + type ml struct { + c *ipv6.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + port := "0" + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip6", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("udp6", net.JoinHostPort(ip.String()+"%"+ifi.Name, port)) // unicast address with non-reusable port + if err != nil { + // The listen may fail when the serivce is + // already in use, but it's fine because the + // purpose of this is not to test the + // bookkeeping of IP control block inside the + // kernel. + t.Log(err) + continue + } + defer c.Close() + if port == "0" { + _, port, err = net.SplitHostPort(c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + } + p := ipv6.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::") // wildcard address + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "darwin", "dragonfly", "openbsd": // platforms that return fe80::1%lo0: bind: can't assign requested address + t.Skipf("not supported on %s", runtime.GOOS) + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + type ml struct { + c *ipv6.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip6", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("ip6:ipv6-icmp", ip.String()+"%"+ifi.Name) // unicast address + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go new file mode 100644 index 000000000..9e6b902d7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go @@ -0,0 +1,157 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var packetConnMulticastSocketOptionTests = []struct { + net, proto, addr string + grp, src net.Addr +}{ + {"udp6", "", "[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff02::115")}, nil}, // see RFC 4727 + + {"udp6", "", "[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771 + {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff30::8000:2")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnMulticastSocketOptionTests { + if tt.net == "ip6" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, p, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src) + } + } +} + +type testIPv6MulticastConn interface { + MulticastHopLimit() (int, error) + SetMulticastHopLimit(ttl int) error + MulticastLoopback() (bool, error) + SetMulticastLoopback(bool) error + JoinGroup(*net.Interface, net.Addr) error + LeaveGroup(*net.Interface, net.Addr) error + JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error +} + +func testMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp net.Addr) { + const hoplim = 255 + if err := c.SetMulticastHopLimit(hoplim); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastHopLimit(); err != nil { + t.Error(err) + return + } else if v != hoplim { + t.Errorf("got %v; want %v", v, hoplim) + return + } + + for _, toggle := range []bool{true, false} { + if err := c.SetMulticastLoopback(toggle); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastLoopback(); err != nil { + t.Error(err) + return + } else if v != toggle { + t.Errorf("got %v; want %v", v, toggle) + return + } + } + + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} + +func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp, src net.Addr) { + // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + return + } + t.Error(err) + return + } + if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} diff --git a/vendor/golang.org/x/net/ipv6/payload.go b/vendor/golang.org/x/net/ipv6/payload.go new file mode 100644 index 000000000..a8197f169 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv6 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go new file mode 100644 index 000000000..4ee4b062c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -0,0 +1,35 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !nacl,!plan9,!windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.writeTo(b, cm, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go new file mode 100644 index 000000000..fdc6c3994 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 +// +build !nacl,!plan9,!windows + +package ipv6 + +import "net" + +func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + var nn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + if n, nn, _, src, err = c.ReadMsgIP(b, oob); err != nil { + return 0, nil, nil, err + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} + } + if nn > 0 { + cm = new(ControlMessage) + if err = cm.Parse(oob[:nn]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + } + if cm != nil { + cm.Src = netAddrToIP16(src) + } + return +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + oob := cm.Marshal() + if dst == nil { + return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} + } + return +} diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go new file mode 100644 index 000000000..8f6d02e2f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go @@ -0,0 +1,57 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !nacl,!plan9,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + var cm *ControlMessage + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP16(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err := c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go new file mode 100644 index 000000000..99a43542b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -0,0 +1,41 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go b/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go new file mode 100644 index 000000000..c11d92ae9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go @@ -0,0 +1,242 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv6_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagHopLimit | ipv6.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr()) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr()) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv6.PacketConn, data []byte, dst net.Addr) { + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go b/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go new file mode 100644 index 000000000..e2fd73370 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go @@ -0,0 +1,373 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv6_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagHopLimit | ipv6.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv6.Message{ + { + Buffers: [][]byte{payload}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv6.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv6.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv6.Message{ + { + Buffers: [][]byte{datagram}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv6.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv6.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), true) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), true) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv6.PacketConn, data []byte, dst net.Addr, batch bool) { + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + batchReader := func() { + defer wg.Done() + ms := []ipv6.Message{ + { + Buffers: [][]byte{make([]byte, 128)}, + OOB: ipv6.NewControlMessage(cf), + }, + } + n, err := p.ReadBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + var cm ipv6.ControlMessage + if err := cm.Parse(ms[0].OOB[:ms[0].NN]); err != nil { + t.Error(err) + return + } + b := ms[0].Buffers[0][:ms[0].N] + if !bytes.Equal(b, data) { + t.Errorf("got %#v; want %#v", b, data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + batchWriter := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + ms := []ipv6.Message{ + { + Buffers: [][]byte{data}, + OOB: cm.Marshal(), + Addr: dst, + }, + } + n, err := p.WriteBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + if ms[0].N != len(data) { + t.Errorf("got %d; want %d", ms[0].N, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + if batch { + go batchWriter(i%2 != 0) + } else { + go writer(i%2 != 0) + } + } + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_test.go b/vendor/golang.org/x/net/ipv6/readwrite_test.go new file mode 100644 index 000000000..206b915ce --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/readwrite_test.go @@ -0,0 +1,148 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func BenchmarkReadWriteUnicast(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + + dst := c.LocalAddr() + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + + b.Run("NetUDP", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(wb, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("IPv6UDP", func(b *testing.B) { + p := ipv6.NewPacketConn(c) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(wb, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) +} + +func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + wb := []byte("HELLO-R-U-THERE") + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + rb := make([]byte, 128) + if n, cm, _, err := p.ReadFrom(rb); err != nil { + t.Error(err) + return + } else if !bytes.Equal(rb[:n], wb) { + t.Errorf("got %v; want %v", rb[:n], wb) + return + } else { + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + } + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Error(err) + return + } else if n != len(wb) { + t.Errorf("got %d; want %d", n, len(wb)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt.go b/vendor/golang.org/x/net/ipv6/sockopt.go new file mode 100644 index 000000000..cc3907df3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTrafficClass = iota // header field for unicast packet, RFC 3542 + ssoHopLimit // header field for unicast packet, RFC 3493 + ssoMulticastInterface // outbound interface for multicast packet, RFC 3493 + ssoMulticastHopLimit // header field for multicast packet, RFC 3493 + ssoMulticastLoopback // loopback for multicast packet, RFC 3493 + ssoReceiveTrafficClass // header field on received packet, RFC 3542 + ssoReceiveHopLimit // header field on received packet, RFC 2292 or 3542 + ssoReceivePacketInfo // incbound or outbound packet path, RFC 2292 or 3542 + ssoReceivePathMTU // path mtu, RFC 3542 + ssoPathMTU // path mtu, RFC 3542 + ssoChecksum // packet checksum, RFC 2292 or 3542 + ssoICMPFilter // icmp filter, RFC 2292 or 3542 + ssoJoinGroup // any-source multicast, RFC 3493 + ssoLeaveGroup // any-source multicast, RFC 3493 + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_posix.go new file mode 100644 index 000000000..0eac86eb8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -0,0 +1,87 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + n, err := so.GetInt(c) + if err != nil { + return nil, err + } + return net.InterfaceByIndex(n) +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + var n int + if ifi != nil { + n = ifi.Index + } + return so.SetInt(c, n) +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPv6Filter { + return nil, errOpNoSupport + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPv6Filter]byte)(unsafe.Pointer(f))[:sizeofICMPv6Filter] + return so.Set(c, b) +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, 0, err + } + if n != sizeofIPv6Mtuinfo { + return nil, 0, errOpNoSupport + } + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if mi.Addr.Scope_id == 0 { + return nil, int(mi.Mtu), nil + } + ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) + if err != nil { + return nil, 0, err + } + return ifi, int(mi.Mtu), nil +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errOpNoSupport + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go new file mode 100644 index 000000000..1f4a273e4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -0,0 +1,46 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errOpNoSupport +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + return nil, 0, errOpNoSupport +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_test.go b/vendor/golang.org/x/net/ipv6/sockopt_test.go new file mode 100644 index 000000000..774338dbf --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_test.go @@ -0,0 +1,133 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "fmt" + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var supportsIPv6 bool = nettest.SupportsIPv6() + +func TestConnInitiatorPathMTU(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go acceptor(t, ln, done) + + c, err := net.Dial("tcp6", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_PATHMTU option + t.Logf("not supported on %s", runtime.GOOS) + default: + t.Fatal(err) + } + } else { + t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu) + } + + <-done +} + +func TestConnResponderPathMTU(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go connector(t, "tcp6", ln.Addr().String(), done) + + c, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_PATHMTU option + t.Logf("not supported on %s", runtime.GOOS) + default: + t.Fatal(err) + } + } else { + t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu) + } + + <-done +} + +func TestPacketConnChecksum(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolOSPFIGP), "::") // OSPF for IPv6 + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + offset := 12 // see RFC 5340 + + for _, toggle := range []bool{false, true} { + if err := p.SetChecksum(toggle, offset); err != nil { + if toggle { + t.Fatalf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err) + } else { + // Some platforms never allow to disable the kernel + // checksum processing. + t.Logf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err) + } + } + if on, offset, err := p.Checksum(); err != nil { + t.Fatal(err) + } else { + t.Logf("kernel checksum processing enabled=%v, offset=%v", on, offset) + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/vendor/golang.org/x/net/ipv6/sys_asmreq.go new file mode 100644 index 000000000..b0510c0b5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -0,0 +1,24 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreq ipv6Mreq + copy(mreq.Multiaddr[:], grp) + if ifi != nil { + mreq.setIfindex(ifi.Index) + } + b := (*[sizeofIPv6Mreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPv6Mreq] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go new file mode 100644 index 000000000..eece96187 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -0,0 +1,17 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf.go b/vendor/golang.org/x/net/ipv6/sys_bpf.go new file mode 100644 index 000000000..b2dbcb2f2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go new file mode 100644 index 000000000..676bea555 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv6 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/golang.org/x/net/ipv6/sys_bsd.go new file mode 100644 index 000000000..e416eaa1f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly netbsd openbsd + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_darwin.go b/vendor/golang.org/x/net/ipv6/sys_darwin.go new file mode 100644 index 000000000..e3d044392 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_darwin.go @@ -0,0 +1,106 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "strconv" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlHopLimit: {sysIPV6_2292HOPLIMIT, 4, marshal2292HopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_2292PKTINFO, sizeofInet6Pktinfo, marshal2292PacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292HOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292PKTINFO, Len: 4}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func init() { + // Seems like kern.osreldate is veiled on latest OS X. We use + // kern.osrelease instead. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + // The IP_PKTINFO and protocol-independent multicast API were + // introduced in OS X 10.7 (Darwin 11). But it looks like + // those features require OS X 10.8 (Darwin 12) or above. + // See http://support.apple.com/kb/HT1633. + if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 { + return + } + ctlOpts[ctlTrafficClass] = ctlOpt{sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass} + ctlOpts[ctlHopLimit] = ctlOpt{sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit} + ctlOpts[ctlPacketInfo] = ctlOpt{sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo} + ctlOpts[ctlNextHop] = ctlOpt{sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop} + ctlOpts[ctlPathMTU] = ctlOpt{sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU} + sockOpts[ssoTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}} + sockOpts[ssoReceiveTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}} + sockOpts[ssoReceiveHopLimit] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}} + sockOpts[ssoReceivePacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}} + sockOpts[ssoReceivePathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}} + sockOpts[ssoPathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}} + sockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} +} + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/vendor/golang.org/x/net/ipv6/sys_freebsd.go new file mode 100644 index 000000000..e9349dc2c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_freebsd.go @@ -0,0 +1,92 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_linux.go b/vendor/golang.org/x/net/ipv6/sys_linux.go new file mode 100644 index 000000000..bc218103c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_linux.go @@ -0,0 +1,74 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMPV6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_solaris.go b/vendor/golang.org/x/net/ipv6/sys_solaris.go new file mode 100644 index 000000000..d348b5f6e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_solaris.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go new file mode 100644 index 000000000..add8ccc0b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var freebsd32o64 bool + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if freebsd32o64 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if freebsd32o64 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go new file mode 100644 index 000000000..581ee490f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go new file mode 100644 index 000000000..b845388ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv6/sys_windows.go b/vendor/golang.org/x/net/ipv6/sys_windows.go new file mode 100644 index 000000000..fc36b018b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_windows.go @@ -0,0 +1,75 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PKTINFO = 0x13 + + sizeofSockaddrInet6 = 0x1c + + sizeofIPv6Mreq = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofICMPv6Filter = 0 +) + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type icmpv6Filter struct { + // TODO(mikio): implement this +} + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/unicast_test.go b/vendor/golang.org/x/net/ipv6/unicast_test.go new file mode 100644 index 000000000..a0b7d9550 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/unicast_test.go @@ -0,0 +1,184 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func TestPacketConnReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + cm.HopLimit = i + 1 + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } +} + +func TestPacketConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst, err := net.ResolveIPAddr("ip6", "::1") + if err != nil { + t.Fatal(err) + } + + pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, dst.IP) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + + var psh []byte + for i, toggle := range []bool{true, false, true} { + if toggle { + psh = nil + if err := p.SetChecksum(true, 2); err != nil { + // Solaris never allows to modify + // ICMP properties. + if runtime.GOOS != "solaris" { + t.Fatal(err) + } + } + } else { + psh = pshicmp + // Some platforms never allow to disable the + // kernel checksum processing. + p.SetChecksum(false, -1) + } + wb, err := (&icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(psh) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + cm.HopLimit = i + 1 + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil { + t.Fatal(err) + } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0) + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go new file mode 100644 index 000000000..e175dccf5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func TestConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + errc := make(chan error, 1) + go func() { + c, err := ln.Accept() + if err != nil { + errc <- err + return + } + errc <- c.Close() + }() + + c, err := net.Dial("tcp6", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv6.NewConn(c)) + + if err := <-errc; err != nil { + t.Errorf("server: %v", err) + } +} + +var packetConnUnicastSocketOptionTests = []struct { + net, proto, addr string +}{ + {"udp6", "", "[::1]:0"}, + {"ip6", ":ipv6-icmp", "::1"}, +} + +func TestPacketConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnUnicastSocketOptionTests { + if tt.net == "ip6" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv6.NewPacketConn(c)) + } +} + +type testIPv6UnicastConn interface { + TrafficClass() (int, error) + SetTrafficClass(int) error + HopLimit() (int, error) + SetHopLimit(int) error +} + +func testUnicastSocketOptions(t *testing.T, c testIPv6UnicastConn) { + tclass := iana.DiffServCS0 | iana.NotECNTransport + if err := c.SetTrafficClass(tclass); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_TCLASS option + t.Logf("not supported on %s", runtime.GOOS) + goto next + } + t.Fatal(err) + } + if v, err := c.TrafficClass(); err != nil { + t.Fatal(err) + } else if v != tclass { + t.Fatalf("got %v; want %v", v, tclass) + } + +next: + hoplim := 255 + if err := c.SetHopLimit(hoplim); err != nil { + t.Fatal(err) + } + if v, err := c.HopLimit(); err != nil { + t.Fatal(err) + } else if v != hoplim { + t.Fatalf("got %v; want %v", v, hoplim) + } +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/vendor/golang.org/x/net/ipv6/zsys_darwin.go new file mode 100644 index 000000000..6aab1dfab --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_darwin.go @@ -0,0 +1,131 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + sysIPV6_2292PKTINFO = 0x13 + sysIPV6_2292HOPLIMIT = 0x14 + sysIPV6_2292NEXTHOP = 0x15 + sysIPV6_2292HOPOPTS = 0x16 + sysIPV6_2292DSTOPTS = 0x17 + sysIPV6_2292RTHDR = 0x18 + + sysIPV6_2292PKTOPTIONS = 0x19 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RECVTCLASS = 0x23 + sysIPV6_TCLASS = 0x24 + + sysIPV6_RTHDRDSTOPTS = 0x39 + + sysIPV6_RECVPKTINFO = 0x3d + + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_MSFILTER = 0x4a + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_BOUND_IF = 0x7d + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go new file mode 100644 index 000000000..d2de804d8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go @@ -0,0 +1,88 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go new file mode 100644 index 000000000..919e572d4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go @@ -0,0 +1,122 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go new file mode 100644 index 000000000..cb8141f9c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go @@ -0,0 +1,124 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go new file mode 100644 index 000000000..cb8141f9c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go @@ -0,0 +1,124 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go new file mode 100644 index 000000000..73aa8c6df --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go new file mode 100644 index 000000000..b64f0157d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go new file mode 100644 index 000000000..73aa8c6df --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go new file mode 100644 index 000000000..b64f0157d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go new file mode 100644 index 000000000..73aa8c6df --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go new file mode 100644 index 000000000..b64f0157d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go new file mode 100644 index 000000000..b64f0157d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go new file mode 100644 index 000000000..73aa8c6df --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go new file mode 100644 index 000000000..c9bf6a87e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go new file mode 100644 index 000000000..b64f0157d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go new file mode 100644 index 000000000..b64f0157d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go new file mode 100644 index 000000000..b64f0157d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go new file mode 100644 index 000000000..bcada13b7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go @@ -0,0 +1,84 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go new file mode 100644 index 000000000..86cf3c637 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTH_LEVEL = 0x35 + sysIPV6_ESP_TRANS_LEVEL = 0x36 + sysIPV6_ESP_NETWORK_LEVEL = 0x37 + sysIPSEC6_OUTSA = 0x38 + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + sysIPV6_IPCOMP_LEVEL = 0x3c + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + sysIPV6_PIPEX = 0x3f + + sysIPV6_RTABLE = 0x1021 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/vendor/golang.org/x/net/ipv6/zsys_solaris.go new file mode 100644 index 000000000..cf1837dd2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_solaris.go @@ -0,0 +1,131 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x5 + sysIPV6_MULTICAST_IF = 0x6 + sysIPV6_MULTICAST_HOPS = 0x7 + sysIPV6_MULTICAST_LOOP = 0x8 + sysIPV6_JOIN_GROUP = 0x9 + sysIPV6_LEAVE_GROUP = 0xa + + sysIPV6_PKTINFO = 0xb + + sysIPV6_HOPLIMIT = 0xc + sysIPV6_NEXTHOP = 0xd + sysIPV6_HOPOPTS = 0xe + sysIPV6_DSTOPTS = 0xf + + sysIPV6_RTHDR = 0x10 + sysIPV6_RTHDRDSTOPTS = 0x11 + + sysIPV6_RECVPKTINFO = 0x12 + sysIPV6_RECVHOPLIMIT = 0x13 + sysIPV6_RECVHOPOPTS = 0x14 + + sysIPV6_RECVRTHDR = 0x16 + + sysIPV6_RECVRTHDRDSTOPTS = 0x17 + + sysIPV6_CHECKSUM = 0x18 + sysIPV6_RECVTCLASS = 0x19 + sysIPV6_USE_MIN_MTU = 0x20 + sysIPV6_DONTFRAG = 0x21 + sysIPV6_SEC_OPT = 0x22 + sysIPV6_SRC_PREFERENCES = 0x23 + sysIPV6_RECVPATHMTU = 0x24 + sysIPV6_PATHMTU = 0x25 + sysIPV6_TCLASS = 0x26 + sysIPV6_V6ONLY = 0x27 + + sysIPV6_RECVDSTOPTS = 0x28 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sysIPV6_PREFER_SRC_HOME = 0x1 + sysIPV6_PREFER_SRC_COA = 0x2 + sysIPV6_PREFER_SRC_PUBLIC = 0x4 + sysIPV6_PREFER_SRC_TMP = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x10 + sysIPV6_PREFER_SRC_CGA = 0x20 + + sysIPV6_PREFER_SRC_MIPMASK = 0x3 + sysIPV6_PREFER_SRC_MIPDEFAULT = 0x1 + sysIPV6_PREFER_SRC_TMPMASK = 0xc + sysIPV6_PREFER_SRC_TMPDEFAULT = 0x4 + sysIPV6_PREFER_SRC_CGAMASK = 0x30 + sysIPV6_PREFER_SRC_CGADEFAULT = 0x10 + + sysIPV6_PREFER_SRC_MASK = 0x3f + + sysIPV6_PREFER_SRC_DEFAULT = 0x15 + + sysIPV6_BOUND_IF = 0x41 + sysIPV6_UNSPEC_SRC = 0x42 + + sysICMP6_FILTER = 0x1 + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet6 = 0x20 + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x24 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} + +type icmpv6Filter struct { + X__icmp6_filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go new file mode 100644 index 000000000..20f2b8940 --- /dev/null +++ b/vendor/golang.org/x/net/lex/httplex/httplex.go @@ -0,0 +1,351 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httplex contains rules around lexical matters of various +// HTTP-related specifications. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httplex + +import ( + "net" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func IsTokenRune(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !IsTokenRune(r) +} + +// HeaderValuesContainsToken reports whether any string in values +// contains the provided token, ASCII case-insensitively. +func HeaderValuesContainsToken(values []string, token string) bool { + for _, v := range values { + if headerValueContainsToken(v, token) { + return true + } + } + return false +} + +// isOWS reports whether b is an optional whitespace byte, as defined +// by RFC 7230 section 3.2.3. +func isOWS(b byte) bool { return b == ' ' || b == '\t' } + +// trimOWS returns x with all optional whitespace removes from the +// beginning and end. +func trimOWS(x string) string { + // TODO: consider using strings.Trim(x, " \t") instead, + // if and when it's fast enough. See issue 10292. + // But this ASCII-only code will probably always beat UTF-8 + // aware code. + for len(x) > 0 && isOWS(x[0]) { + x = x[1:] + } + for len(x) > 0 && isOWS(x[len(x)-1]) { + x = x[:len(x)-1] + } + return x +} + +// headerValueContainsToken reports whether v (assumed to be a +// 0#element, in the ABNF extension described in RFC 7230 section 7) +// contains token amongst its comma-separated tokens, ASCII +// case-insensitively. +func headerValueContainsToken(v string, token string) bool { + v = trimOWS(v) + if comma := strings.IndexByte(v, ','); comma != -1 { + return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + } + return tokenEqual(v, token) +} + +// lowerASCII returns the ASCII lowercase version of b. +func lowerASCII(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. +func tokenEqual(t1, t2 string) bool { + if len(t1) != len(t2) { + return false + } + for i, b := range t1 { + if b >= utf8.RuneSelf { + // No UTF-8 or non-ASCII allowed in tokens. + return false + } + if lowerASCII(byte(b)) != lowerASCII(t2[i]) { + return false + } + } + return true +} + +// isLWS reports whether b is linear white space, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// LWS = [CRLF] 1*( SP | HT ) +func isLWS(b byte) bool { return b == ' ' || b == '\t' } + +// isCTL reports whether b is a control byte, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// CTL = +func isCTL(b byte) bool { + const del = 0x7f // a CTL + return b < ' ' || b == del +} + +// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. +// HTTP/2 imposes the additional restriction that uppercase ASCII +// letters are not allowed. +// +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +func ValidHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !IsTokenRune(r) { + return false + } + } + return true +} + +// ValidHostHeader reports whether h is a valid host header. +func ValidHostHeader(h string) bool { + // The latest spec is actually this: + // + // http://tools.ietf.org/html/rfc7230#section-5.4 + // Host = uri-host [ ":" port ] + // + // Where uri-host is: + // http://tools.ietf.org/html/rfc3986#section-3.2.2 + // + // But we're going to be much more lenient for now and just + // search for any byte that's not a valid byte in any of those + // expressions. + for i := 0; i < len(h); i++ { + if !validHostByte[h[i]] { + return false + } + } + return true +} + +// See the validHostHeader comment. +var validHostByte = [256]bool{ + '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, + '8': true, '9': true, + + 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, + 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, + 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, + 'y': true, 'z': true, + + 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, + 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, + 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, + 'Y': true, 'Z': true, + + '!': true, // sub-delims + '$': true, // sub-delims + '%': true, // pct-encoded (and used in IPv6 zones) + '&': true, // sub-delims + '(': true, // sub-delims + ')': true, // sub-delims + '*': true, // sub-delims + '+': true, // sub-delims + ',': true, // sub-delims + '-': true, // unreserved + '.': true, // unreserved + ':': true, // IPv6address + Host expression's optional port + ';': true, // sub-delims + '=': true, // sub-delims + '[': true, + '\'': true, // sub-delims + ']': true, + '_': true, // unreserved + '~': true, // unreserved +} + +// ValidHeaderFieldValue reports whether v is a valid "field-value" according to +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : +// +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = +// +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : +// +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func ValidHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + b := v[i] + if isCTL(b) && !isLWS(b) { + return false + } + } + return true +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// PunycodeHostPort returns the IDNA Punycode version +// of the provided "host" or "host:port" string. +func PunycodeHostPort(v string) (string, error) { + if isASCII(v) { + return v, nil + } + + host, port, err := net.SplitHostPort(v) + if err != nil { + // The input 'v' argument was just a "host" argument, + // without a port. This error should not be returned + // to the caller. + host = v + port = "" + } + host, err = idna.ToASCII(host) + if err != nil { + // Non-UTF-8? Not representable in Punycode, in any + // case. + return "", err + } + if port == "" { + return host, nil + } + return net.JoinHostPort(host, port), nil +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex_test.go b/vendor/golang.org/x/net/lex/httplex/httplex_test.go new file mode 100644 index 000000000..f47adc939 --- /dev/null +++ b/vendor/golang.org/x/net/lex/httplex/httplex_test.go @@ -0,0 +1,119 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httplex + +import ( + "testing" +) + +func isChar(c rune) bool { return c <= 127 } + +func isCtl(c rune) bool { return c <= 31 || c == 127 } + +func isSeparator(c rune) bool { + switch c { + case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t': + return true + } + return false +} + +func TestIsToken(t *testing.T) { + for i := 0; i <= 130; i++ { + r := rune(i) + expected := isChar(r) && !isCtl(r) && !isSeparator(r) + if IsTokenRune(r) != expected { + t.Errorf("isToken(0x%x) = %v", r, !expected) + } + } +} + +func TestHeaderValuesContainsToken(t *testing.T) { + tests := []struct { + vals []string + token string + want bool + }{ + { + vals: []string{"foo"}, + token: "foo", + want: true, + }, + { + vals: []string{"bar", "foo"}, + token: "foo", + want: true, + }, + { + vals: []string{"foo"}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo"}, + token: "bar", + want: false, + }, + { + vals: []string{" foo "}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar,foo,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar , foo"}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo ,bar "}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar, foo ,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar , foo"}, + token: "FOO", + want: true, + }, + } + for _, tt := range tests { + got := HeaderValuesContainsToken(tt.vals, tt.token) + if got != tt.want { + t.Errorf("headerValuesContainsToken(%q, %q) = %v; want %v", tt.vals, tt.token, got, tt.want) + } + } +} + +func TestPunycodeHostPort(t *testing.T) { + tests := []struct { + in, want string + }{ + {"www.google.com", "www.google.com"}, + {"гофер.рф", "xn--c1ae0ajs.xn--p1ai"}, + {"bücher.de", "xn--bcher-kva.de"}, + {"bücher.de:8080", "xn--bcher-kva.de:8080"}, + {"[1::6]:8080", "[1::6]:8080"}, + } + for _, tt := range tests { + got, err := PunycodeHostPort(tt.in) + if tt.want != got || err != nil { + t.Errorf("PunycodeHostPort(%q) = %q, %v, want %q, nil", tt.in, got, err, tt.want) + } + } +} diff --git a/vendor/golang.org/x/net/lif/address.go b/vendor/golang.org/x/net/lif/address.go new file mode 100644 index 000000000..afb957fd8 --- /dev/null +++ b/vendor/golang.org/x/net/lif/address.go @@ -0,0 +1,105 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "errors" + "unsafe" +) + +// An Addr represents an address associated with packet routing. +type Addr interface { + // Family returns an address family. + Family() int +} + +// An Inet4Addr represents an internet address for IPv4. +type Inet4Addr struct { + IP [4]byte // IP address + PrefixLen int // address prefix length +} + +// Family implements the Family method of Addr interface. +func (a *Inet4Addr) Family() int { return sysAF_INET } + +// An Inet6Addr represents an internet address for IPv6. +type Inet6Addr struct { + IP [16]byte // IP address + PrefixLen int // address prefix length + ZoneID int // zone identifier +} + +// Family implements the Family method of Addr interface. +func (a *Inet6Addr) Family() int { return sysAF_INET6 } + +// Addrs returns a list of interface addresses. +// +// The provided af must be an address family and name must be a data +// link name. The zero value of af or name means a wildcard. +func Addrs(af int, name string) ([]Addr, error) { + eps, err := newEndpoints(af) + if len(eps) == 0 { + return nil, err + } + defer func() { + for _, ep := range eps { + ep.close() + } + }() + lls, err := links(eps, name) + if len(lls) == 0 { + return nil, err + } + var as []Addr + for _, ll := range lls { + var lifr lifreq + for i := 0; i < len(ll.Name); i++ { + lifr.Name[i] = int8(ll.Name[i]) + } + for _, ep := range eps { + ioc := int64(sysSIOCGLIFADDR) + err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifr)) + if err != nil { + continue + } + sa := (*sockaddrStorage)(unsafe.Pointer(&lifr.Lifru[0])) + l := int(nativeEndian.Uint32(lifr.Lifru1[:4])) + if l == 0 { + continue + } + switch sa.Family { + case sysAF_INET: + a := &Inet4Addr{PrefixLen: l} + copy(a.IP[:], lifr.Lifru[4:8]) + as = append(as, a) + case sysAF_INET6: + a := &Inet6Addr{PrefixLen: l, ZoneID: int(nativeEndian.Uint32(lifr.Lifru[24:28]))} + copy(a.IP[:], lifr.Lifru[8:24]) + as = append(as, a) + } + } + } + return as, nil +} + +func parseLinkAddr(b []byte) ([]byte, error) { + nlen, alen, slen := int(b[1]), int(b[2]), int(b[3]) + l := 4 + nlen + alen + slen + if len(b) < l { + return nil, errors.New("invalid address") + } + b = b[4:] + var addr []byte + if nlen > 0 { + b = b[nlen:] + } + if alen > 0 { + addr = make([]byte, alen) + copy(addr, b[:alen]) + } + return addr, nil +} diff --git a/vendor/golang.org/x/net/lif/address_test.go b/vendor/golang.org/x/net/lif/address_test.go new file mode 100644 index 000000000..a25f10b67 --- /dev/null +++ b/vendor/golang.org/x/net/lif/address_test.go @@ -0,0 +1,123 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "fmt" + "testing" +) + +type addrFamily int + +func (af addrFamily) String() string { + switch af { + case sysAF_UNSPEC: + return "unspec" + case sysAF_INET: + return "inet4" + case sysAF_INET6: + return "inet6" + default: + return fmt.Sprintf("%d", af) + } +} + +const hexDigit = "0123456789abcdef" + +type llAddr []byte + +func (a llAddr) String() string { + if len(a) == 0 { + return "" + } + buf := make([]byte, 0, len(a)*3-1) + for i, b := range a { + if i > 0 { + buf = append(buf, ':') + } + buf = append(buf, hexDigit[b>>4]) + buf = append(buf, hexDigit[b&0xF]) + } + return string(buf) +} + +type ipAddr []byte + +func (a ipAddr) String() string { + if len(a) == 0 { + return "" + } + if len(a) == 4 { + return fmt.Sprintf("%d.%d.%d.%d", a[0], a[1], a[2], a[3]) + } + if len(a) == 16 { + return fmt.Sprintf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]) + } + s := make([]byte, len(a)*2) + for i, tn := range a { + s[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf] + } + return string(s) +} + +func (a *Inet4Addr) String() string { + return fmt.Sprintf("(%s %s %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.PrefixLen) +} + +func (a *Inet6Addr) String() string { + return fmt.Sprintf("(%s %s %d %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.PrefixLen, a.ZoneID) +} + +type addrPack struct { + af int + as []Addr +} + +func addrPacks() ([]addrPack, error) { + var lastErr error + var aps []addrPack + for _, af := range [...]int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + as, err := Addrs(af, "") + if err != nil { + lastErr = err + continue + } + aps = append(aps, addrPack{af: af, as: as}) + } + return aps, lastErr +} + +func TestAddrs(t *testing.T) { + aps, err := addrPacks() + if len(aps) == 0 && err != nil { + t.Fatal(err) + } + lps, err := linkPacks() + if len(lps) == 0 && err != nil { + t.Fatal(err) + } + for _, lp := range lps { + n := 0 + for _, ll := range lp.lls { + as, err := Addrs(lp.af, ll.Name) + if err != nil { + t.Fatal(lp.af, ll.Name, err) + } + t.Logf("af=%s name=%s %v", addrFamily(lp.af), ll.Name, as) + n += len(as) + } + for _, ap := range aps { + if ap.af != lp.af { + continue + } + if n != len(ap.as) { + t.Errorf("af=%s got %d; want %d", addrFamily(lp.af), n, len(ap.as)) + continue + } + } + } +} diff --git a/vendor/golang.org/x/net/lif/binary.go b/vendor/golang.org/x/net/lif/binary.go new file mode 100644 index 000000000..738a94f42 --- /dev/null +++ b/vendor/golang.org/x/net/lif/binary.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +// This file contains duplicates of encoding/binary package. +// +// This package is supposed to be used by the net package of standard +// library. Therefore the package set used in the package must be the +// same as net package. + +var ( + littleEndian binaryLittleEndian + bigEndian binaryBigEndian +) + +type binaryByteOrder interface { + Uint16([]byte) uint16 + Uint32([]byte) uint32 + Uint64([]byte) uint64 + PutUint16([]byte, uint16) + PutUint32([]byte, uint32) + PutUint64([]byte, uint64) +} + +type binaryLittleEndian struct{} + +func (binaryLittleEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[0]) | uint16(b[1])<<8 +} + +func (binaryLittleEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) +} + +func (binaryLittleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (binaryLittleEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func (binaryLittleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (binaryLittleEndian) PutUint64(b []byte, v uint64) { + _ = b[7] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) + b[4] = byte(v >> 32) + b[5] = byte(v >> 40) + b[6] = byte(v >> 48) + b[7] = byte(v >> 56) +} + +type binaryBigEndian struct{} + +func (binaryBigEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[1]) | uint16(b[0])<<8 +} + +func (binaryBigEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 8) + b[1] = byte(v) +} + +func (binaryBigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (binaryBigEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) +} + +func (binaryBigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +func (binaryBigEndian) PutUint64(b []byte, v uint64) { + _ = b[7] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 56) + b[1] = byte(v >> 48) + b[2] = byte(v >> 40) + b[3] = byte(v >> 32) + b[4] = byte(v >> 24) + b[5] = byte(v >> 16) + b[6] = byte(v >> 8) + b[7] = byte(v) +} diff --git a/vendor/golang.org/x/net/lif/defs_solaris.go b/vendor/golang.org/x/net/lif/defs_solaris.go new file mode 100644 index 000000000..02c19981d --- /dev/null +++ b/vendor/golang.org/x/net/lif/defs_solaris.go @@ -0,0 +1,90 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package lif + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_DGRAM = C.SOCK_DGRAM +) + +type sockaddrStorage C.struct_sockaddr_storage + +const ( + sysLIFC_NOXMIT = C.LIFC_NOXMIT + sysLIFC_EXTERNAL_SOURCE = C.LIFC_EXTERNAL_SOURCE + sysLIFC_TEMPORARY = C.LIFC_TEMPORARY + sysLIFC_ALLZONES = C.LIFC_ALLZONES + sysLIFC_UNDER_IPMP = C.LIFC_UNDER_IPMP + sysLIFC_ENABLED = C.LIFC_ENABLED + + sysSIOCGLIFADDR = C.SIOCGLIFADDR + sysSIOCGLIFDSTADDR = C.SIOCGLIFDSTADDR + sysSIOCGLIFFLAGS = C.SIOCGLIFFLAGS + sysSIOCGLIFMTU = C.SIOCGLIFMTU + sysSIOCGLIFNETMASK = C.SIOCGLIFNETMASK + sysSIOCGLIFMETRIC = C.SIOCGLIFMETRIC + sysSIOCGLIFNUM = C.SIOCGLIFNUM + sysSIOCGLIFINDEX = C.SIOCGLIFINDEX + sysSIOCGLIFSUBNET = C.SIOCGLIFSUBNET + sysSIOCGLIFLNKINFO = C.SIOCGLIFLNKINFO + sysSIOCGLIFCONF = C.SIOCGLIFCONF + sysSIOCGLIFHWADDR = C.SIOCGLIFHWADDR +) + +const ( + sysIFF_UP = C.IFF_UP + sysIFF_BROADCAST = C.IFF_BROADCAST + sysIFF_DEBUG = C.IFF_DEBUG + sysIFF_LOOPBACK = C.IFF_LOOPBACK + sysIFF_POINTOPOINT = C.IFF_POINTOPOINT + sysIFF_NOTRAILERS = C.IFF_NOTRAILERS + sysIFF_RUNNING = C.IFF_RUNNING + sysIFF_NOARP = C.IFF_NOARP + sysIFF_PROMISC = C.IFF_PROMISC + sysIFF_ALLMULTI = C.IFF_ALLMULTI + sysIFF_INTELLIGENT = C.IFF_INTELLIGENT + sysIFF_MULTICAST = C.IFF_MULTICAST + sysIFF_MULTI_BCAST = C.IFF_MULTI_BCAST + sysIFF_UNNUMBERED = C.IFF_UNNUMBERED + sysIFF_PRIVATE = C.IFF_PRIVATE +) + +const ( + sizeofLifnum = C.sizeof_struct_lifnum + sizeofLifreq = C.sizeof_struct_lifreq + sizeofLifconf = C.sizeof_struct_lifconf + sizeofLifIfinfoReq = C.sizeof_struct_lif_ifinfo_req +) + +type lifnum C.struct_lifnum + +type lifreq C.struct_lifreq + +type lifconf C.struct_lifconf + +type lifIfinfoReq C.struct_lif_ifinfo_req + +const ( + sysIFT_IPV4 = C.IFT_IPV4 + sysIFT_IPV6 = C.IFT_IPV6 + sysIFT_6TO4 = C.IFT_6TO4 +) diff --git a/vendor/golang.org/x/net/lif/lif.go b/vendor/golang.org/x/net/lif/lif.go new file mode 100644 index 000000000..6e81f81f1 --- /dev/null +++ b/vendor/golang.org/x/net/lif/lif.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +// Package lif provides basic functions for the manipulation of +// logical network interfaces and interface addresses on Solaris. +// +// The package supports Solaris 11 or above. +package lif + +import "syscall" + +type endpoint struct { + af int + s uintptr +} + +func (ep *endpoint) close() error { + return syscall.Close(int(ep.s)) +} + +func newEndpoints(af int) ([]endpoint, error) { + var lastErr error + var eps []endpoint + afs := []int{sysAF_INET, sysAF_INET6} + if af != sysAF_UNSPEC { + afs = []int{af} + } + for _, af := range afs { + s, err := syscall.Socket(af, sysSOCK_DGRAM, 0) + if err != nil { + lastErr = err + continue + } + eps = append(eps, endpoint{af: af, s: uintptr(s)}) + } + if len(eps) == 0 { + return nil, lastErr + } + return eps, nil +} diff --git a/vendor/golang.org/x/net/lif/link.go b/vendor/golang.org/x/net/lif/link.go new file mode 100644 index 000000000..913a53e11 --- /dev/null +++ b/vendor/golang.org/x/net/lif/link.go @@ -0,0 +1,126 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import "unsafe" + +// A Link represents logical data link information. +// +// It also represents base information for logical network interface. +// On Solaris, each logical network interface represents network layer +// adjacency information and the interface has a only single network +// address or address pair for tunneling. It's usual that multiple +// logical network interfaces share the same logical data link. +type Link struct { + Name string // name, equivalent to IP interface name + Index int // index, equivalent to IP interface index + Type int // type + Flags int // flags + MTU int // maximum transmission unit, basically link MTU but may differ between IP address families + Addr []byte // address +} + +func (ll *Link) fetch(s uintptr) { + var lifr lifreq + for i := 0; i < len(ll.Name); i++ { + lifr.Name[i] = int8(ll.Name[i]) + } + ioc := int64(sysSIOCGLIFINDEX) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Index = int(nativeEndian.Uint32(lifr.Lifru[:4])) + } + ioc = int64(sysSIOCGLIFFLAGS) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Flags = int(nativeEndian.Uint64(lifr.Lifru[:8])) + } + ioc = int64(sysSIOCGLIFMTU) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.MTU = int(nativeEndian.Uint32(lifr.Lifru[:4])) + } + switch ll.Type { + case sysIFT_IPV4, sysIFT_IPV6, sysIFT_6TO4: + default: + ioc = int64(sysSIOCGLIFHWADDR) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Addr, _ = parseLinkAddr(lifr.Lifru[4:]) + } + } +} + +// Links returns a list of logical data links. +// +// The provided af must be an address family and name must be a data +// link name. The zero value of af or name means a wildcard. +func Links(af int, name string) ([]Link, error) { + eps, err := newEndpoints(af) + if len(eps) == 0 { + return nil, err + } + defer func() { + for _, ep := range eps { + ep.close() + } + }() + return links(eps, name) +} + +func links(eps []endpoint, name string) ([]Link, error) { + var lls []Link + lifn := lifnum{Flags: sysLIFC_NOXMIT | sysLIFC_TEMPORARY | sysLIFC_ALLZONES | sysLIFC_UNDER_IPMP} + lifc := lifconf{Flags: sysLIFC_NOXMIT | sysLIFC_TEMPORARY | sysLIFC_ALLZONES | sysLIFC_UNDER_IPMP} + for _, ep := range eps { + lifn.Family = uint16(ep.af) + ioc := int64(sysSIOCGLIFNUM) + if err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifn)); err != nil { + continue + } + if lifn.Count == 0 { + continue + } + b := make([]byte, lifn.Count*sizeofLifreq) + lifc.Family = uint16(ep.af) + lifc.Len = lifn.Count * sizeofLifreq + if len(lifc.Lifcu) == 8 { + nativeEndian.PutUint64(lifc.Lifcu[:], uint64(uintptr(unsafe.Pointer(&b[0])))) + } else { + nativeEndian.PutUint32(lifc.Lifcu[:], uint32(uintptr(unsafe.Pointer(&b[0])))) + } + ioc = int64(sysSIOCGLIFCONF) + if err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifc)); err != nil { + continue + } + nb := make([]byte, 32) // see LIFNAMSIZ in net/if.h + for i := 0; i < int(lifn.Count); i++ { + lifr := (*lifreq)(unsafe.Pointer(&b[i*sizeofLifreq])) + for i := 0; i < 32; i++ { + if lifr.Name[i] == 0 { + nb = nb[:i] + break + } + nb[i] = byte(lifr.Name[i]) + } + llname := string(nb) + nb = nb[:32] + if isDupLink(lls, llname) || name != "" && name != llname { + continue + } + ll := Link{Name: llname, Type: int(lifr.Type)} + ll.fetch(ep.s) + lls = append(lls, ll) + } + } + return lls, nil +} + +func isDupLink(lls []Link, name string) bool { + for _, ll := range lls { + if ll.Name == name { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/net/lif/link_test.go b/vendor/golang.org/x/net/lif/link_test.go new file mode 100644 index 000000000..0cb9b95c6 --- /dev/null +++ b/vendor/golang.org/x/net/lif/link_test.go @@ -0,0 +1,63 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "fmt" + "testing" +) + +func (ll *Link) String() string { + return fmt.Sprintf("name=%s index=%d type=%d flags=%#x mtu=%d addr=%v", ll.Name, ll.Index, ll.Type, ll.Flags, ll.MTU, llAddr(ll.Addr)) +} + +type linkPack struct { + af int + lls []Link +} + +func linkPacks() ([]linkPack, error) { + var lastErr error + var lps []linkPack + for _, af := range [...]int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + lls, err := Links(af, "") + if err != nil { + lastErr = err + continue + } + lps = append(lps, linkPack{af: af, lls: lls}) + } + return lps, lastErr +} + +func TestLinks(t *testing.T) { + lps, err := linkPacks() + if len(lps) == 0 && err != nil { + t.Fatal(err) + } + for _, lp := range lps { + n := 0 + for _, sll := range lp.lls { + lls, err := Links(lp.af, sll.Name) + if err != nil { + t.Fatal(lp.af, sll.Name, err) + } + for _, ll := range lls { + if ll.Name != sll.Name || ll.Index != sll.Index { + t.Errorf("af=%s got %v; want %v", addrFamily(lp.af), &ll, &sll) + continue + } + t.Logf("af=%s name=%s %v", addrFamily(lp.af), sll.Name, &ll) + n++ + } + } + if n != len(lp.lls) { + t.Errorf("af=%s got %d; want %d", addrFamily(lp.af), n, len(lp.lls)) + continue + } + } +} diff --git a/vendor/golang.org/x/net/lif/sys.go b/vendor/golang.org/x/net/lif/sys.go new file mode 100644 index 000000000..c896041b7 --- /dev/null +++ b/vendor/golang.org/x/net/lif/sys.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import "unsafe" + +var nativeEndian binaryByteOrder + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = littleEndian + } else { + nativeEndian = bigEndian + } +} diff --git a/vendor/golang.org/x/net/lif/sys_solaris_amd64.s b/vendor/golang.org/x/net/lif/sys_solaris_amd64.s new file mode 100644 index 000000000..39d76af79 --- /dev/null +++ b/vendor/golang.org/x/net/lif/sys_solaris_amd64.s @@ -0,0 +1,8 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) diff --git a/vendor/golang.org/x/net/lif/syscall.go b/vendor/golang.org/x/net/lif/syscall.go new file mode 100644 index 000000000..aadab2e14 --- /dev/null +++ b/vendor/golang.org/x/net/lif/syscall.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +//go:linkname procIoctl libc_ioctl + +var procIoctl uintptr + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func ioctl(s, ioc uintptr, arg unsafe.Pointer) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procIoctl)), 3, s, ioc, uintptr(arg), 0, 0, 0) + if errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go b/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go new file mode 100644 index 000000000..b5e999bec --- /dev/null +++ b/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go @@ -0,0 +1,103 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package lif + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1a + + sysSOCK_DGRAM = 0x1 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +const ( + sysLIFC_NOXMIT = 0x1 + sysLIFC_EXTERNAL_SOURCE = 0x2 + sysLIFC_TEMPORARY = 0x4 + sysLIFC_ALLZONES = 0x8 + sysLIFC_UNDER_IPMP = 0x10 + sysLIFC_ENABLED = 0x20 + + sysSIOCGLIFADDR = -0x3f87968f + sysSIOCGLIFDSTADDR = -0x3f87968d + sysSIOCGLIFFLAGS = -0x3f87968b + sysSIOCGLIFMTU = -0x3f879686 + sysSIOCGLIFNETMASK = -0x3f879683 + sysSIOCGLIFMETRIC = -0x3f879681 + sysSIOCGLIFNUM = -0x3ff3967e + sysSIOCGLIFINDEX = -0x3f87967b + sysSIOCGLIFSUBNET = -0x3f879676 + sysSIOCGLIFLNKINFO = -0x3f879674 + sysSIOCGLIFCONF = -0x3fef965b + sysSIOCGLIFHWADDR = -0x3f879640 +) + +const ( + sysIFF_UP = 0x1 + sysIFF_BROADCAST = 0x2 + sysIFF_DEBUG = 0x4 + sysIFF_LOOPBACK = 0x8 + sysIFF_POINTOPOINT = 0x10 + sysIFF_NOTRAILERS = 0x20 + sysIFF_RUNNING = 0x40 + sysIFF_NOARP = 0x80 + sysIFF_PROMISC = 0x100 + sysIFF_ALLMULTI = 0x200 + sysIFF_INTELLIGENT = 0x400 + sysIFF_MULTICAST = 0x800 + sysIFF_MULTI_BCAST = 0x1000 + sysIFF_UNNUMBERED = 0x2000 + sysIFF_PRIVATE = 0x8000 +) + +const ( + sizeofLifnum = 0xc + sizeofLifreq = 0x178 + sizeofLifconf = 0x18 + sizeofLifIfinfoReq = 0x10 +) + +type lifnum struct { + Family uint16 + Pad_cgo_0 [2]byte + Flags int32 + Count int32 +} + +type lifreq struct { + Name [32]int8 + Lifru1 [4]byte + Type uint32 + Lifru [336]byte +} + +type lifconf struct { + Family uint16 + Pad_cgo_0 [2]byte + Flags int32 + Len int32 + Pad_cgo_1 [4]byte + Lifcu [8]byte +} + +type lifIfinfoReq struct { + Maxhops uint8 + Pad_cgo_0 [3]byte + Reachtime uint32 + Reachretrans uint32 + Maxmtu uint32 +} + +const ( + sysIFT_IPV4 = 0xc8 + sysIFT_IPV6 = 0xc9 + sysIFT_6TO4 = 0xca +) diff --git a/vendor/golang.org/x/net/nettest/conntest.go b/vendor/golang.org/x/net/nettest/conntest.go new file mode 100644 index 000000000..5bd3a8c68 --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest.go @@ -0,0 +1,456 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nettest provides utilities for network testing. +package nettest + +import ( + "bytes" + "encoding/binary" + "io" + "io/ioutil" + "math/rand" + "net" + "runtime" + "sync" + "testing" + "time" +) + +var ( + aLongTimeAgo = time.Unix(233431200, 0) + neverTimeout = time.Time{} +) + +// MakePipe creates a connection between two endpoints and returns the pair +// as c1 and c2, such that anything written to c1 is read by c2 and vice-versa. +// The stop function closes all resources, including c1, c2, and the underlying +// net.Listener (if there is one), and should not be nil. +type MakePipe func() (c1, c2 net.Conn, stop func(), err error) + +// TestConn tests that a net.Conn implementation properly satisfies the interface. +// The tests should not produce any false positives, but may experience +// false negatives. Thus, some issues may only be detected when the test is +// run multiple times. For maximal effectiveness, run the tests under the +// race detector. +func TestConn(t *testing.T, mp MakePipe) { + testConn(t, mp) +} + +type connTester func(t *testing.T, c1, c2 net.Conn) + +func timeoutWrapper(t *testing.T, mp MakePipe, f connTester) { + c1, c2, stop, err := mp() + if err != nil { + t.Fatalf("unable to make pipe: %v", err) + } + var once sync.Once + defer once.Do(func() { stop() }) + timer := time.AfterFunc(time.Minute, func() { + once.Do(func() { + t.Error("test timed out; terminating pipe") + stop() + }) + }) + defer timer.Stop() + f(t, c1, c2) +} + +// testBasicIO tests that the data sent on c1 is properly received on c2. +func testBasicIO(t *testing.T, c1, c2 net.Conn) { + want := make([]byte, 1<<20) + rand.New(rand.NewSource(0)).Read(want) + + dataCh := make(chan []byte) + go func() { + rd := bytes.NewReader(want) + if err := chunkedCopy(c1, rd); err != nil { + t.Errorf("unexpected c1.Write error: %v", err) + } + if err := c1.Close(); err != nil { + t.Errorf("unexpected c1.Close error: %v", err) + } + }() + + go func() { + wr := new(bytes.Buffer) + if err := chunkedCopy(wr, c2); err != nil { + t.Errorf("unexpected c2.Read error: %v", err) + } + if err := c2.Close(); err != nil { + t.Errorf("unexpected c2.Close error: %v", err) + } + dataCh <- wr.Bytes() + }() + + if got := <-dataCh; !bytes.Equal(got, want) { + t.Errorf("transmitted data differs") + } +} + +// testPingPong tests that the two endpoints can synchronously send data to +// each other in a typical request-response pattern. +func testPingPong(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + defer wg.Wait() + + pingPonger := func(c net.Conn) { + defer wg.Done() + buf := make([]byte, 8) + var prev uint64 + for { + if _, err := io.ReadFull(c, buf); err != nil { + if err == io.EOF { + break + } + t.Errorf("unexpected Read error: %v", err) + } + + v := binary.LittleEndian.Uint64(buf) + binary.LittleEndian.PutUint64(buf, v+1) + if prev != 0 && prev+2 != v { + t.Errorf("mismatching value: got %d, want %d", v, prev+2) + } + prev = v + if v == 1000 { + break + } + + if _, err := c.Write(buf); err != nil { + t.Errorf("unexpected Write error: %v", err) + break + } + } + if err := c.Close(); err != nil { + t.Errorf("unexpected Close error: %v", err) + } + } + + wg.Add(2) + go pingPonger(c1) + go pingPonger(c2) + + // Start off the chain reaction. + if _, err := c1.Write(make([]byte, 8)); err != nil { + t.Errorf("unexpected c1.Write error: %v", err) + } +} + +// testRacyRead tests that it is safe to mutate the input Read buffer +// immediately after cancelation has occurred. +func testRacyRead(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, rand.New(rand.NewSource(0))) + + var wg sync.WaitGroup + defer wg.Wait() + + c1.SetReadDeadline(time.Now().Add(time.Millisecond)) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + b1 := make([]byte, 1024) + b2 := make([]byte, 1024) + for j := 0; j < 100; j++ { + _, err := c1.Read(b1) + copy(b1, b2) // Mutate b1 to trigger potential race + if err != nil { + checkForTimeoutError(t, err) + c1.SetReadDeadline(time.Now().Add(time.Millisecond)) + } + } + }() + } +} + +// testRacyWrite tests that it is safe to mutate the input Write buffer +// immediately after cancelation has occurred. +func testRacyWrite(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(ioutil.Discard, c2) + + var wg sync.WaitGroup + defer wg.Wait() + + c1.SetWriteDeadline(time.Now().Add(time.Millisecond)) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + b1 := make([]byte, 1024) + b2 := make([]byte, 1024) + for j := 0; j < 100; j++ { + _, err := c1.Write(b1) + copy(b1, b2) // Mutate b1 to trigger potential race + if err != nil { + checkForTimeoutError(t, err) + c1.SetWriteDeadline(time.Now().Add(time.Millisecond)) + } + } + }() + } +} + +// testReadTimeout tests that Read timeouts do not affect Write. +func testReadTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(ioutil.Discard, c2) + + c1.SetReadDeadline(aLongTimeAgo) + _, err := c1.Read(make([]byte, 1024)) + checkForTimeoutError(t, err) + if _, err := c1.Write(make([]byte, 1024)); err != nil { + t.Errorf("unexpected Write error: %v", err) + } +} + +// testWriteTimeout tests that Write timeouts do not affect Read. +func testWriteTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, rand.New(rand.NewSource(0))) + + c1.SetWriteDeadline(aLongTimeAgo) + _, err := c1.Write(make([]byte, 1024)) + checkForTimeoutError(t, err) + if _, err := c1.Read(make([]byte, 1024)); err != nil { + t.Errorf("unexpected Read error: %v", err) + } +} + +// testPastTimeout tests that a deadline set in the past immediately times out +// Read and Write requests. +func testPastTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, c2) + + testRoundtrip(t, c1) + + c1.SetDeadline(aLongTimeAgo) + n, err := c1.Write(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Write count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + n, err = c1.Read(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Read count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + + testRoundtrip(t, c1) +} + +// testPresentTimeout tests that a deadline set while there are pending +// Read and Write operations immediately times out those operations. +func testPresentTimeout(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(3) + + deadlineSet := make(chan bool, 1) + go func() { + defer wg.Done() + time.Sleep(100 * time.Millisecond) + deadlineSet <- true + c1.SetReadDeadline(aLongTimeAgo) + c1.SetWriteDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + n, err := c1.Read(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Read count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + if len(deadlineSet) == 0 { + t.Error("Read timed out before deadline is set") + } + }() + go func() { + defer wg.Done() + var err error + for err == nil { + _, err = c1.Write(make([]byte, 1024)) + } + checkForTimeoutError(t, err) + if len(deadlineSet) == 0 { + t.Error("Write timed out before deadline is set") + } + }() +} + +// testFutureTimeout tests that a future deadline will eventually time out +// Read and Write operations. +func testFutureTimeout(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + wg.Add(2) + + c1.SetDeadline(time.Now().Add(100 * time.Millisecond)) + go func() { + defer wg.Done() + _, err := c1.Read(make([]byte, 1024)) + checkForTimeoutError(t, err) + }() + go func() { + defer wg.Done() + var err error + for err == nil { + _, err = c1.Write(make([]byte, 1024)) + } + checkForTimeoutError(t, err) + }() + wg.Wait() + + go chunkedCopy(c2, c2) + resyncConn(t, c1) + testRoundtrip(t, c1) +} + +// testCloseTimeout tests that calling Close immediately times out pending +// Read and Write operations. +func testCloseTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, c2) + + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(3) + + // Test for cancelation upon connection closure. + c1.SetDeadline(neverTimeout) + go func() { + defer wg.Done() + time.Sleep(100 * time.Millisecond) + c1.Close() + }() + go func() { + defer wg.Done() + var err error + buf := make([]byte, 1024) + for err == nil { + _, err = c1.Read(buf) + } + }() + go func() { + defer wg.Done() + var err error + buf := make([]byte, 1024) + for err == nil { + _, err = c1.Write(buf) + } + }() +} + +// testConcurrentMethods tests that the methods of net.Conn can safely +// be called concurrently. +func testConcurrentMethods(t *testing.T, c1, c2 net.Conn) { + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; see https://golang.org/issue/20489") + } + go chunkedCopy(c2, c2) + + // The results of the calls may be nonsensical, but this should + // not trigger a race detector warning. + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(7) + go func() { + defer wg.Done() + c1.Read(make([]byte, 1024)) + }() + go func() { + defer wg.Done() + c1.Write(make([]byte, 1024)) + }() + go func() { + defer wg.Done() + c1.SetDeadline(time.Now().Add(10 * time.Millisecond)) + }() + go func() { + defer wg.Done() + c1.SetReadDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + c1.SetWriteDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + c1.LocalAddr() + }() + go func() { + defer wg.Done() + c1.RemoteAddr() + }() + } + wg.Wait() // At worst, the deadline is set 10ms into the future + + resyncConn(t, c1) + testRoundtrip(t, c1) +} + +// checkForTimeoutError checks that the error satisfies the Error interface +// and that Timeout returns true. +func checkForTimeoutError(t *testing.T, err error) { + if nerr, ok := err.(net.Error); ok { + if !nerr.Timeout() { + t.Errorf("err.Timeout() = false, want true") + } + } else { + t.Errorf("got %T, want net.Error", err) + } +} + +// testRoundtrip writes something into c and reads it back. +// It assumes that everything written into c is echoed back to itself. +func testRoundtrip(t *testing.T, c net.Conn) { + if err := c.SetDeadline(neverTimeout); err != nil { + t.Errorf("roundtrip SetDeadline error: %v", err) + } + + const s = "Hello, world!" + buf := []byte(s) + if _, err := c.Write(buf); err != nil { + t.Errorf("roundtrip Write error: %v", err) + } + if _, err := io.ReadFull(c, buf); err != nil { + t.Errorf("roundtrip Read error: %v", err) + } + if string(buf) != s { + t.Errorf("roundtrip data mismatch: got %q, want %q", buf, s) + } +} + +// resyncConn resynchronizes the connection into a sane state. +// It assumes that everything written into c is echoed back to itself. +// It assumes that 0xff is not currently on the wire or in the read buffer. +func resyncConn(t *testing.T, c net.Conn) { + c.SetDeadline(neverTimeout) + errCh := make(chan error) + go func() { + _, err := c.Write([]byte{0xff}) + errCh <- err + }() + buf := make([]byte, 1024) + for { + n, err := c.Read(buf) + if n > 0 && bytes.IndexByte(buf[:n], 0xff) == n-1 { + break + } + if err != nil { + t.Errorf("unexpected Read error: %v", err) + break + } + } + if err := <-errCh; err != nil { + t.Errorf("unexpected Write error: %v", err) + } +} + +// chunkedCopy copies from r to w in fixed-width chunks to avoid +// causing a Write that exceeds the maximum packet size for packet-based +// connections like "unixpacket". +// We assume that the maximum packet size is at least 1024. +func chunkedCopy(w io.Writer, r io.Reader) error { + b := make([]byte, 1024) + _, err := io.CopyBuffer(struct{ io.Writer }{w}, struct{ io.Reader }{r}, b) + return err +} diff --git a/vendor/golang.org/x/net/nettest/conntest_go16.go b/vendor/golang.org/x/net/nettest/conntest_go16.go new file mode 100644 index 000000000..4cbf48e35 --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest_go16.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package nettest + +import "testing" + +func testConn(t *testing.T, mp MakePipe) { + // Avoid using subtests on Go 1.6 and below. + timeoutWrapper(t, mp, testBasicIO) + timeoutWrapper(t, mp, testPingPong) + timeoutWrapper(t, mp, testRacyRead) + timeoutWrapper(t, mp, testRacyWrite) + timeoutWrapper(t, mp, testReadTimeout) + timeoutWrapper(t, mp, testWriteTimeout) + timeoutWrapper(t, mp, testPastTimeout) + timeoutWrapper(t, mp, testPresentTimeout) + timeoutWrapper(t, mp, testFutureTimeout) + timeoutWrapper(t, mp, testCloseTimeout) + timeoutWrapper(t, mp, testConcurrentMethods) +} diff --git a/vendor/golang.org/x/net/nettest/conntest_go17.go b/vendor/golang.org/x/net/nettest/conntest_go17.go new file mode 100644 index 000000000..fa039f03f --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest_go17.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package nettest + +import "testing" + +func testConn(t *testing.T, mp MakePipe) { + // Use subtests on Go 1.7 and above since it is better organized. + t.Run("BasicIO", func(t *testing.T) { timeoutWrapper(t, mp, testBasicIO) }) + t.Run("PingPong", func(t *testing.T) { timeoutWrapper(t, mp, testPingPong) }) + t.Run("RacyRead", func(t *testing.T) { timeoutWrapper(t, mp, testRacyRead) }) + t.Run("RacyWrite", func(t *testing.T) { timeoutWrapper(t, mp, testRacyWrite) }) + t.Run("ReadTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testReadTimeout) }) + t.Run("WriteTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testWriteTimeout) }) + t.Run("PastTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testPastTimeout) }) + t.Run("PresentTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testPresentTimeout) }) + t.Run("FutureTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testFutureTimeout) }) + t.Run("CloseTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testCloseTimeout) }) + t.Run("ConcurrentMethods", func(t *testing.T) { timeoutWrapper(t, mp, testConcurrentMethods) }) +} diff --git a/vendor/golang.org/x/net/nettest/conntest_test.go b/vendor/golang.org/x/net/nettest/conntest_test.go new file mode 100644 index 000000000..9f9453fb5 --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest_test.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package nettest + +import ( + "net" + "os" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" +) + +func TestTestConn(t *testing.T) { + tests := []struct{ name, network string }{ + {"TCP", "tcp"}, + {"UnixPipe", "unix"}, + {"UnixPacketPipe", "unixpacket"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if !nettest.TestableNetwork(tt.network) { + t.Skipf("not supported on %s", runtime.GOOS) + } + + mp := func() (c1, c2 net.Conn, stop func(), err error) { + ln, err := nettest.NewLocalListener(tt.network) + if err != nil { + return nil, nil, nil, err + } + + // Start a connection between two endpoints. + var err1, err2 error + done := make(chan bool) + go func() { + c2, err2 = ln.Accept() + close(done) + }() + c1, err1 = net.Dial(ln.Addr().Network(), ln.Addr().String()) + <-done + + stop = func() { + if err1 == nil { + c1.Close() + } + if err2 == nil { + c2.Close() + } + ln.Close() + switch tt.network { + case "unix", "unixpacket": + os.Remove(ln.Addr().String()) + } + } + + switch { + case err1 != nil: + stop() + return nil, nil, nil, err1 + case err2 != nil: + stop() + return nil, nil, nil, err2 + default: + return c1, c2, stop, nil + } + } + + TestConn(t, mp) + }) + } +} diff --git a/vendor/golang.org/x/net/netutil/listen.go b/vendor/golang.org/x/net/netutil/listen.go new file mode 100644 index 000000000..56f43bf65 --- /dev/null +++ b/vendor/golang.org/x/net/netutil/listen.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package netutil provides network utility functions, complementing the more +// common ones in the net package. +package netutil // import "golang.org/x/net/netutil" + +import ( + "net" + "sync" +) + +// LimitListener returns a Listener that accepts at most n simultaneous +// connections from the provided Listener. +func LimitListener(l net.Listener, n int) net.Listener { + return &limitListener{l, make(chan struct{}, n)} +} + +type limitListener struct { + net.Listener + sem chan struct{} +} + +func (l *limitListener) acquire() { l.sem <- struct{}{} } +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + l.acquire() + c, err := l.Listener.Accept() + if err != nil { + l.release() + return nil, err + } + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} diff --git a/vendor/golang.org/x/net/netutil/listen_test.go b/vendor/golang.org/x/net/netutil/listen_test.go new file mode 100644 index 000000000..5e07d7bea --- /dev/null +++ b/vendor/golang.org/x/net/netutil/listen_test.go @@ -0,0 +1,101 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package netutil + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/internal/nettest" +) + +func TestLimitListener(t *testing.T) { + const max = 5 + attempts := (nettest.MaxOpenFiles() - max) / 2 + if attempts > 256 { // maximum length of accept queue is 128 by default + attempts = 256 + } + + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + l = LimitListener(l, max) + + var open int32 + go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if n := atomic.AddInt32(&open, 1); n > max { + t.Errorf("%d open connections, want <= %d", n, max) + } + defer atomic.AddInt32(&open, -1) + time.Sleep(10 * time.Millisecond) + fmt.Fprint(w, "some body") + })) + + var wg sync.WaitGroup + var failed int32 + for i := 0; i < attempts; i++ { + wg.Add(1) + go func() { + defer wg.Done() + c := http.Client{Timeout: 3 * time.Second} + r, err := c.Get("http://" + l.Addr().String()) + if err != nil { + t.Log(err) + atomic.AddInt32(&failed, 1) + return + } + defer r.Body.Close() + io.Copy(ioutil.Discard, r.Body) + }() + } + wg.Wait() + + // We expect some Gets to fail as the kernel's accept queue is filled, + // but most should succeed. + if int(failed) >= attempts/2 { + t.Errorf("%d requests failed within %d attempts", failed, attempts) + } +} + +type errorListener struct { + net.Listener +} + +func (errorListener) Accept() (net.Conn, error) { + return nil, errFake +} + +var errFake = errors.New("fake error from errorListener") + +// This used to hang. +func TestLimitListenerError(t *testing.T) { + donec := make(chan bool, 1) + go func() { + const n = 2 + ll := LimitListener(errorListener{}, n) + for i := 0; i < n+1; i++ { + _, err := ll.Accept() + if err != errFake { + t.Fatalf("Accept error = %v; want errFake", err) + } + } + donec <- true + }() + select { + case <-donec: + case <-time.After(5 * time.Second): + t.Fatal("timeout. deadlock?") + } +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 000000000..4c5ad88b1 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,18 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" +) + +type direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var Direct = direct{} + +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 000000000..0689bb6a7 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,140 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/per_host_test.go b/vendor/golang.org/x/net/proxy/per_host_test.go new file mode 100644 index 000000000..a7d809571 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host_test.go @@ -0,0 +1,55 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "errors" + "net" + "reflect" + "testing" +) + +type recordingProxy struct { + addrs []string +} + +func (r *recordingProxy) Dial(network, addr string) (net.Conn, error) { + r.addrs = append(r.addrs, addr) + return nil, errors.New("recordingProxy") +} + +func TestPerHost(t *testing.T) { + var def, bypass recordingProxy + perHost := NewPerHost(&def, &bypass) + perHost.AddFromString("localhost,*.zone,127.0.0.1,10.0.0.1/8,1000::/16") + + expectedDef := []string{ + "example.com:123", + "1.2.3.4:123", + "[1001::]:123", + } + expectedBypass := []string{ + "localhost:123", + "zone:123", + "foo.zone:123", + "127.0.0.1:123", + "10.1.2.3:123", + "[1000::]:123", + } + + for _, addr := range expectedDef { + perHost.Dial("tcp", addr) + } + for _, addr := range expectedBypass { + perHost.Dial("tcp", addr) + } + + if !reflect.DeepEqual(expectedDef, def.addrs) { + t.Errorf("Hosts which went to the default proxy didn't match. Got %v, want %v", def.addrs, expectedDef) + } + if !reflect.DeepEqual(expectedBypass, bypass.addrs) { + t.Errorf("Hosts which went to the bypass proxy didn't match. Got %v, want %v", bypass.addrs, expectedBypass) + } +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 000000000..553ead7cf --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,134 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" + "sync" +) + +// A Dialer is a means to establish a connection. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func FromEnvironment() Dialer { + allProxy := allProxyEnv.Get() + if len(allProxy) == 0 { + return Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return Direct + } + proxy, err := FromURL(proxyURL, Direct) + if err != nil { + return Direct + } + + noProxy := noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + allProxyEnv = &envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} diff --git a/vendor/golang.org/x/net/proxy/proxy_test.go b/vendor/golang.org/x/net/proxy/proxy_test.go new file mode 100644 index 000000000..0f31e211c --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy_test.go @@ -0,0 +1,215 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "bytes" + "fmt" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" + "testing" +) + +type proxyFromEnvTest struct { + allProxyEnv string + noProxyEnv string + wantTypeOf Dialer +} + +func (t proxyFromEnvTest) String() string { + var buf bytes.Buffer + space := func() { + if buf.Len() > 0 { + buf.WriteByte(' ') + } + } + if t.allProxyEnv != "" { + fmt.Fprintf(&buf, "all_proxy=%q", t.allProxyEnv) + } + if t.noProxyEnv != "" { + space() + fmt.Fprintf(&buf, "no_proxy=%q", t.noProxyEnv) + } + return strings.TrimSpace(buf.String()) +} + +func TestFromEnvironment(t *testing.T) { + ResetProxyEnv() + + type dummyDialer struct { + direct + } + + RegisterDialerType("irc", func(_ *url.URL, _ Dialer) (Dialer, error) { + return dummyDialer{}, nil + }) + + proxyFromEnvTests := []proxyFromEnvTest{ + {allProxyEnv: "127.0.0.1:8080", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {allProxyEnv: "ftp://example.com:8000", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {allProxyEnv: "socks5://example.com:8080", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: &PerHost{}}, + {allProxyEnv: "irc://example.com:8000", wantTypeOf: dummyDialer{}}, + {noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {wantTypeOf: direct{}}, + } + + for _, tt := range proxyFromEnvTests { + os.Setenv("ALL_PROXY", tt.allProxyEnv) + os.Setenv("NO_PROXY", tt.noProxyEnv) + ResetCachedEnvironment() + + d := FromEnvironment() + if got, want := fmt.Sprintf("%T", d), fmt.Sprintf("%T", tt.wantTypeOf); got != want { + t.Errorf("%v: got type = %T, want %T", tt, d, tt.wantTypeOf) + } + } +} + +func TestFromURL(t *testing.T) { + endSystem, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer endSystem.Close() + gateway, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer gateway.Close() + + var wg sync.WaitGroup + wg.Add(1) + go socks5Gateway(t, gateway, endSystem, socks5Domain, &wg) + + url, err := url.Parse("socks5://user:password@" + gateway.Addr().String()) + if err != nil { + t.Fatalf("url.Parse failed: %v", err) + } + proxy, err := FromURL(url, Direct) + if err != nil { + t.Fatalf("FromURL failed: %v", err) + } + _, port, err := net.SplitHostPort(endSystem.Addr().String()) + if err != nil { + t.Fatalf("net.SplitHostPort failed: %v", err) + } + if c, err := proxy.Dial("tcp", "localhost:"+port); err != nil { + t.Fatalf("FromURL.Dial failed: %v", err) + } else { + c.Close() + } + + wg.Wait() +} + +func TestSOCKS5(t *testing.T) { + endSystem, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer endSystem.Close() + gateway, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer gateway.Close() + + var wg sync.WaitGroup + wg.Add(1) + go socks5Gateway(t, gateway, endSystem, socks5IP4, &wg) + + proxy, err := SOCKS5("tcp", gateway.Addr().String(), nil, Direct) + if err != nil { + t.Fatalf("SOCKS5 failed: %v", err) + } + if c, err := proxy.Dial("tcp", endSystem.Addr().String()); err != nil { + t.Fatalf("SOCKS5.Dial failed: %v", err) + } else { + c.Close() + } + + wg.Wait() +} + +func socks5Gateway(t *testing.T, gateway, endSystem net.Listener, typ byte, wg *sync.WaitGroup) { + defer wg.Done() + + c, err := gateway.Accept() + if err != nil { + t.Errorf("net.Listener.Accept failed: %v", err) + return + } + defer c.Close() + + b := make([]byte, 32) + var n int + if typ == socks5Domain { + n = 4 + } else { + n = 3 + } + if _, err := io.ReadFull(c, b[:n]); err != nil { + t.Errorf("io.ReadFull failed: %v", err) + return + } + if _, err := c.Write([]byte{socks5Version, socks5AuthNone}); err != nil { + t.Errorf("net.Conn.Write failed: %v", err) + return + } + if typ == socks5Domain { + n = 16 + } else { + n = 10 + } + if _, err := io.ReadFull(c, b[:n]); err != nil { + t.Errorf("io.ReadFull failed: %v", err) + return + } + if b[0] != socks5Version || b[1] != socks5Connect || b[2] != 0x00 || b[3] != typ { + t.Errorf("got an unexpected packet: %#02x %#02x %#02x %#02x", b[0], b[1], b[2], b[3]) + return + } + if typ == socks5Domain { + copy(b[:5], []byte{socks5Version, 0x00, 0x00, socks5Domain, 9}) + b = append(b, []byte("localhost")...) + } else { + copy(b[:4], []byte{socks5Version, 0x00, 0x00, socks5IP4}) + } + host, port, err := net.SplitHostPort(endSystem.Addr().String()) + if err != nil { + t.Errorf("net.SplitHostPort failed: %v", err) + return + } + b = append(b, []byte(net.ParseIP(host).To4())...) + p, err := strconv.Atoi(port) + if err != nil { + t.Errorf("strconv.Atoi failed: %v", err) + return + } + b = append(b, []byte{byte(p >> 8), byte(p)}...) + if _, err := c.Write(b); err != nil { + t.Errorf("net.Conn.Write failed: %v", err) + return + } +} + +func ResetProxyEnv() { + for _, env := range []*envOnce{allProxyEnv, noProxyEnv} { + for _, v := range env.names { + os.Setenv(v, "") + } + } + ResetCachedEnvironment() +} + +func ResetCachedEnvironment() { + allProxyEnv.reset() + noProxyEnv.reset() +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 000000000..3fed38ef1 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,214 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "errors" + "io" + "net" + "strconv" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) { + s := &socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type socks5 struct { + user, password string + network, addr string + forward Dialer +} + +const socks5Version = 5 + +const ( + socks5AuthNone = 0 + socks5AuthPassword = 2 +) + +const socks5Connect = 1 + +const ( + socks5IP4 = 1 + socks5Domain = 3 + socks5IP6 = 4 +) + +var socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, socks5IP4) + ip = ip4 + } else { + buf = append(buf, socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(socks5Errors) { + failure = socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case socks5IP4: + bytesToDiscard = net.IPv4len + case socks5IP6: + bytesToDiscard = net.IPv6len + case socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go new file mode 100644 index 000000000..f85a3c32b --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/gen.go @@ -0,0 +1,713 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This program generates table.go and table_test.go based on the authoritative +// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat +// +// The version is derived from +// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat +// and a human-readable form is at +// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat +// +// To fetch a particular git revision, such as 5c70ccd250, pass +// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" +// and -version "an explicit version string". + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "regexp" + "sort" + "strings" + + "golang.org/x/net/idna" +) + +const ( + // These sum of these four values must be no greater than 32. + nodesBitsChildren = 10 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + // These sum of these four values must be no greater than 32. + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +var ( + maxChildren int + maxTextOffset int + maxTextLength int + maxHi uint32 + maxLo uint32 +) + +func max(a, b int) int { + if a < b { + return b + } + return a +} + +func u32max(a, b uint32) uint32 { + if a < b { + return b + } + return a +} + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 + numNodeType = 3 +) + +func nodeTypeStr(n int) string { + switch n { + case nodeTypeNormal: + return "+" + case nodeTypeException: + return "!" + case nodeTypeParentOnly: + return "o" + } + panic("unreachable") +} + +const ( + defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" + gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" +) + +var ( + labelEncoding = map[string]uint32{} + labelsList = []string{} + labelsMap = map[string]bool{} + rules = []string{} + + // validSuffixRE is used to check that the entries in the public suffix + // list are in canonical form (after Punycode encoding). Specifically, + // capital letters are not allowed. + validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) + + shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) + dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) + + comments = flag.Bool("comments", false, "generate table.go comments, for debugging") + subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") + url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") + v = flag.Bool("v", false, "verbose output (to stderr)") + version = flag.String("version", "", "the effective_tld_names.dat version") +) + +func main() { + if err := main1(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main1() error { + flag.Parse() + if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { + return fmt.Errorf("not enough bits to encode the nodes table") + } + if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { + return fmt.Errorf("not enough bits to encode the children table") + } + if *version == "" { + if *url != defaultURL { + return fmt.Errorf("-version was not specified, and the -url is not the default one") + } + sha, date, err := gitCommit() + if err != nil { + return err + } + *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) + } + var r io.Reader = os.Stdin + if *url != "" { + res, err := http.Get(*url) + if err != nil { + return err + } + if res.StatusCode != http.StatusOK { + return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) + } + r = res.Body + defer res.Body.Close() + } + + var root node + icann := false + br := bufio.NewReader(r) + for { + s, err := br.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return err + } + s = strings.TrimSpace(s) + if strings.Contains(s, "BEGIN ICANN DOMAINS") { + icann = true + continue + } + if strings.Contains(s, "END ICANN DOMAINS") { + icann = false + continue + } + if s == "" || strings.HasPrefix(s, "//") { + continue + } + s, err = idna.ToASCII(s) + if err != nil { + return err + } + if !validSuffixRE.MatchString(s) { + return fmt.Errorf("bad publicsuffix.org list data: %q", s) + } + + if *subset { + switch { + case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): + case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): + case s == "ao" || strings.HasSuffix(s, ".ao"): + case s == "ar" || strings.HasSuffix(s, ".ar"): + case s == "arpa" || strings.HasSuffix(s, ".arpa"): + case s == "cy" || strings.HasSuffix(s, ".cy"): + case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): + case s == "jp": + case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): + case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): + case s == "om" || strings.HasSuffix(s, ".om"): + case s == "uk" || strings.HasSuffix(s, ".uk"): + case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): + case s == "tw" || strings.HasSuffix(s, ".tw"): + case s == "zw" || strings.HasSuffix(s, ".zw"): + case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): + // xn--p1ai is Russian-Cyrillic "рф". + default: + continue + } + } + + rules = append(rules, s) + + nt, wildcard := nodeTypeNormal, false + switch { + case strings.HasPrefix(s, "*."): + s, nt = s[2:], nodeTypeParentOnly + wildcard = true + case strings.HasPrefix(s, "!"): + s, nt = s[1:], nodeTypeException + } + labels := strings.Split(s, ".") + for n, i := &root, len(labels)-1; i >= 0; i-- { + label := labels[i] + n = n.child(label) + if i == 0 { + if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { + n.nodeType = nt + } + n.icann = n.icann && icann + n.wildcard = n.wildcard || wildcard + } + labelsMap[label] = true + } + } + labelsList = make([]string, 0, len(labelsMap)) + for label := range labelsMap { + labelsList = append(labelsList, label) + } + sort.Strings(labelsList) + + if err := generate(printReal, &root, "table.go"); err != nil { + return err + } + if err := generate(printTest, &root, "table_test.go"); err != nil { + return err + } + return nil +} + +func generate(p func(io.Writer, *node) error, root *node, filename string) error { + buf := new(bytes.Buffer) + if err := p(buf, root); err != nil { + return err + } + b, err := format.Source(buf.Bytes()) + if err != nil { + return err + } + return ioutil.WriteFile(filename, b, 0644) +} + +func gitCommit() (sha, date string, retErr error) { + res, err := http.Get(gitCommitURL) + if err != nil { + return "", "", err + } + if res.StatusCode != http.StatusOK { + return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) + } + defer res.Body.Close() + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if m := shaRE.FindSubmatch(b); m != nil { + sha = string(m[1]) + } + if m := dateRE.FindSubmatch(b); m != nil { + date = string(m[1]) + } + if sha == "" || date == "" { + retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) + } + return sha, date, retErr +} + +func printTest(w io.Writer, n *node) error { + fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") + fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") + for _, rule := range rules { + fmt.Fprintf(w, "%q,\n", rule) + } + fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") + if err := n.walk(w, printNodeLabel); err != nil { + return err + } + fmt.Fprintf(w, "}\n") + return nil +} + +func printReal(w io.Writer, n *node) error { + const header = `// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = %q + +const ( + nodesBitsChildren = %d + nodesBitsICANN = %d + nodesBitsTextOffset = %d + nodesBitsTextLength = %d + + childrenBitsWildcard = %d + childrenBitsNodeType = %d + childrenBitsHi = %d + childrenBitsLo = %d +) + +const ( + nodeTypeNormal = %d + nodeTypeException = %d + nodeTypeParentOnly = %d +) + +// numTLD is the number of top level domains. +const numTLD = %d + +` + fmt.Fprintf(w, header, *version, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, + nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) + + text := combineText(labelsList) + if text == "" { + return fmt.Errorf("internal error: makeText returned no text") + } + for _, label := range labelsList { + offset, length := strings.Index(text, label), len(label) + if offset < 0 { + return fmt.Errorf("internal error: could not find %q in text %q", label, text) + } + maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) + if offset >= 1<= 1< 64 { + n, plus = 64, " +" + } + fmt.Fprintf(w, "%q%s\n", text[:n], plus) + text = text[n:] + } + + if err := n.walk(w, assignIndexes); err != nil { + return err + } + + fmt.Fprintf(w, ` + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] children index +// [%2d bits] ICANN bit +// [%2d bits] text index +// [%2d bits] text length +var nodes = [...]uint32{ +`, + 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) + if err := n.walk(w, printNode); err != nil { + return err + } + fmt.Fprintf(w, `} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] wildcard bit +// [%2d bits] node type +// [%2d bits] high nodes index (exclusive) of children +// [%2d bits] low nodes index (inclusive) of children +var children=[...]uint32{ +`, + 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) + for i, c := range childrenEncoding { + s := "---------------" + lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 + if *comments { + fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", + c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) + } else { + fmt.Fprintf(w, "0x%x,\n", c) + } + } + fmt.Fprintf(w, "}\n\n") + fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { + ss = ss[1:] + } + return ss +} + +// crush combines a list of strings, taking advantage of overlaps. It returns a +// single string that contains each input string as a substring. +func crush(ss []string) string { + maxLabelLen := 0 + for _, s := range ss { + if maxLabelLen < len(s) { + maxLabelLen = len(s) + } + } + + for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { + prefixes := makePrefixMap(ss, prefixLen) + for i, s := range ss { + if len(s) <= prefixLen { + continue + } + mergeLabel(ss, i, prefixLen, prefixes) + } + } + + return strings.Join(ss, "") +} + +// mergeLabel merges the label at ss[i] with the first available matching label +// in prefixMap, where the last "prefixLen" characters in ss[i] match the first +// "prefixLen" characters in the matching label. +// It will merge ss[i] repeatedly until no more matches are available. +// All matching labels merged into ss[i] are replaced by "". +func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { + s := ss[i] + suffix := s[len(s)-prefixLen:] + for _, j := range prefixes[suffix] { + // Empty strings mean "already used." Also avoid merging with self. + if ss[j] == "" || i == j { + continue + } + if *v { + fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", + prefixLen, i, j, ss[i], ss[j], suffix) + } + ss[i] += ss[j][prefixLen:] + ss[j] = "" + // ss[i] has a new suffix, so merge again if possible. + // Note: we only have to merge again at the same prefix length. Shorter + // prefix lengths will be handled in the next iteration of crush's for loop. + // Can there be matches for longer prefix lengths, introduced by the merge? + // I believe that any such matches would by necessity have been eliminated + // during substring removal or merged at a higher prefix length. For + // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" + // would yield "abcde", which could be merged with "bcdef." However, in + // practice "cde" would already have been elimintated by removeSubstrings. + mergeLabel(ss, i, prefixLen, prefixes) + return + } +} + +// prefixMap maps from a prefix to a list of strings containing that prefix. The +// list of strings is represented as indexes into a slice of strings stored +// elsewhere. +type prefixMap map[string][]int + +// makePrefixMap constructs a prefixMap from a slice of strings. +func makePrefixMap(ss []string, prefixLen int) prefixMap { + prefixes := make(prefixMap) + for i, s := range ss { + // We use < rather than <= because if a label matches on a prefix equal to + // its full length, that's actually a substring match handled by + // removeSubstrings. + if prefixLen < len(s) { + prefix := s[:prefixLen] + prefixes[prefix] = append(prefixes[prefix], i) + } + } + + return prefixes +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 000000000..8bbf3bcd7 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,135 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// http://publicsuffix.org/. A public suffix is one under which Internet users +// can directly register names. +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is privately +// managed. For example, foo.org and foo.co.uk are ICANN domains, +// foo.dyndns.org and foo.blogspot.co.uk are private domains. +// +// Use cases for distinguishing ICANN domains like foo.com from private +// domains like foo.appspot.com can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, wildcard := domain, len(domain), false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) + icann = u&(1<>= nodesBitsICANN + u = children[u&(1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1< len(b[j]) +} + +// eTLDPlusOneTestCases come from +// https://github.com/publicsuffix/list/blob/master/tests/test_psl.txt +var eTLDPlusOneTestCases = []struct { + domain, want string +}{ + // Empty input. + {"", ""}, + // Unlisted TLD. + {"example", ""}, + {"example.example", "example.example"}, + {"b.example.example", "example.example"}, + {"a.b.example.example", "example.example"}, + // TLD with only 1 rule. + {"biz", ""}, + {"domain.biz", "domain.biz"}, + {"b.domain.biz", "domain.biz"}, + {"a.b.domain.biz", "domain.biz"}, + // TLD with some 2-level rules. + {"com", ""}, + {"example.com", "example.com"}, + {"b.example.com", "example.com"}, + {"a.b.example.com", "example.com"}, + {"uk.com", ""}, + {"example.uk.com", "example.uk.com"}, + {"b.example.uk.com", "example.uk.com"}, + {"a.b.example.uk.com", "example.uk.com"}, + {"test.ac", "test.ac"}, + // TLD with only 1 (wildcard) rule. + {"mm", ""}, + {"c.mm", ""}, + {"b.c.mm", "b.c.mm"}, + {"a.b.c.mm", "b.c.mm"}, + // More complex TLD. + {"jp", ""}, + {"test.jp", "test.jp"}, + {"www.test.jp", "test.jp"}, + {"ac.jp", ""}, + {"test.ac.jp", "test.ac.jp"}, + {"www.test.ac.jp", "test.ac.jp"}, + {"kyoto.jp", ""}, + {"test.kyoto.jp", "test.kyoto.jp"}, + {"ide.kyoto.jp", ""}, + {"b.ide.kyoto.jp", "b.ide.kyoto.jp"}, + {"a.b.ide.kyoto.jp", "b.ide.kyoto.jp"}, + {"c.kobe.jp", ""}, + {"b.c.kobe.jp", "b.c.kobe.jp"}, + {"a.b.c.kobe.jp", "b.c.kobe.jp"}, + {"city.kobe.jp", "city.kobe.jp"}, + {"www.city.kobe.jp", "city.kobe.jp"}, + // TLD with a wildcard rule and exceptions. + {"ck", ""}, + {"test.ck", ""}, + {"b.test.ck", "b.test.ck"}, + {"a.b.test.ck", "b.test.ck"}, + {"www.ck", "www.ck"}, + {"www.www.ck", "www.ck"}, + // US K12. + {"us", ""}, + {"test.us", "test.us"}, + {"www.test.us", "test.us"}, + {"ak.us", ""}, + {"test.ak.us", "test.ak.us"}, + {"www.test.ak.us", "test.ak.us"}, + {"k12.ak.us", ""}, + {"test.k12.ak.us", "test.k12.ak.us"}, + {"www.test.k12.ak.us", "test.k12.ak.us"}, + // Punycoded IDN labels + {"xn--85x722f.com.cn", "xn--85x722f.com.cn"}, + {"xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, + {"www.xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, + {"shishi.xn--55qx5d.cn", "shishi.xn--55qx5d.cn"}, + {"xn--55qx5d.cn", ""}, + {"xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, + {"www.xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, + {"shishi.xn--fiqs8s", "shishi.xn--fiqs8s"}, + {"xn--fiqs8s", ""}, +} + +func TestEffectiveTLDPlusOne(t *testing.T) { + for _, tc := range eTLDPlusOneTestCases { + got, _ := EffectiveTLDPlusOne(tc.domain) + if got != tc.want { + t.Errorf("%q: got %q, want %q", tc.domain, got, tc.want) + } + } +} diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go new file mode 100644 index 000000000..a870b36cd --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/table.go @@ -0,0 +1,9534 @@ +// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = "publicsuffix.org's public_suffix_list.dat, git revision 0f3b07d9aab6d6c9fe74990af98316468d40f488 (2018-01-25T09:22:16Z)" + +const ( + nodesBitsChildren = 10 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 +) + +// numTLD is the number of top level domains. +const numTLD = 1551 + +// Text is the combined text of all labels. +const text = "0emmafann-arboretumbriamallamaceiobihirosakikamijimatsuzaki234li" + + "ma-cityeatselinogradult3l3p0rtargets-itargivestbytomaritimekeepi" + + "ng120009guacuiababia-goracleaningroks-theatreeastcoastaldefencea" + + "tonsbergjemnes3-ap-northeast-1337bilbaogashimadachicagoboats3-we" + + "bsite-us-east-1billustrationikonanporovnopocznoppdalindesnes3-we" + + "bsite-us-west-1biobirdartcenterprisesakimobetsuitainairforcechir" + + "ealminamiechizeninohekinannestadiybirkenesoddtangenovaranzanpach" + + "igasakievennodesaarlandnpanasonicateringebuilderschmidtre-gaulda" + + "livornobirthplacebitballooningladefinimakanegasakinkobayashikaoi" + + "rminamifuranobjarkoybjerkreimbananarepublicasadelamonedatingjesd" + + "alimitediscountysvardolls3-eu-west-3utilitiesquare7bjugninomiyak" + + "onojorpelandrangedalombardiamonds3-website-us-west-2blancomedica" + + "ltanissettaipeiheijinuyamashinatsukigatakasagotpantheonsitebloom" + + "bergbauernuorochesterbloxcms5ybluedancebmoattachmentsakyotanabel" + + "lunord-aurdalvdalcesalangenirasakinvestmentsalondonetskarmoybmsa" + + "ltdalombardynamisches-dnsaludray-dnsupdaternopilawawebspacebmwed" + + "dinglassassinationalheritagebnpparibaselburgleezebnrwedeployboml" + + "oansalvadordalibabalsanagochihayaakasakawaharaholtalenvironmenta" + + "lconservationishiazainzais-a-candidatebondrayddnsfreebox-osascol" + + "i-picenordre-landraydnsalzburgliwicebonnishigobookinglobalashovh" + + "achinohedmarkarpaczeladzparaglidingloboavistaprintelligencebooml" + + "adbrokesamegawabootsamnangerboschaefflerdalwaysdatabaseballangen" + + "oamishirasatochigiessensiositelekommunikationishiharabostikaruiz" + + "awabostonakijinsekikogentinglogowegroweibolognagasukebotanicalga" + + "rdenishiizunazukis-a-catererbotanicgardenishikatakayamatsushigeb" + + "otanybouncemerckmsdnipropetrovskjervoyagebounty-fullensakerrypro" + + "pertiesampagespeedmobilizeroboutiquebecatholicaxiascolipicenodum" + + "inamiiselectjomemorialomzaporizhzheguris-a-celticsfanishikatsura" + + "git-repostfoldnavybozentsujiiebplacedekagaminord-odalondrinaples" + + "amsclubindalorenskogloppenzaolbia-tempio-olbiatempioolbialystokk" + + "embuchikumagayagawakuyabukihokumakogenglandrivelandrobaknoluokta" + + "chikawakkanaibetsubamericanfamilydscloudcontrolappspotagerbrandy" + + "winevalleybrasiliabrindisibenikebristoloseyouripirangapartmentsa" + + "msungmbhartiffanybritishcolumbialowiezachpomorskienishikawazukam" + + "itsuebroadcastlefrakkestadrudunsandvikcoromantovalle-d-aostathel" + + "lebroadwaybroke-itjxjavald-aostaplesanfranciscofreakunemurorange" + + "iseiyoichippubetsubetsugarugbyengerdalaskanittedallasalleasingle" + + "surancertmgretagajobojis-a-chefarmsteadupontariodejaneirodoybrok" + + "erbronnoysundurbanamexnetlifyis-a-conservativefsnillfjordurhambu" + + "rgminakamichiharabrothermesaverdeatnurembergmodellingmxn--0trq7p" + + "7nnishimerabrowsersafetymarketsangobrumunddalotenkawabrunelastic" + + "beanstalkarumaifarsundyndns-at-workinggrouparisor-fronishinomiya" + + "shironobrusselsanjotkmaxxn--11b4c3dyndns-blogdnsannanishinoomote" + + "gobruxellesannohelplfinancialottebryanskleppgafanquannefrankfurt" + + "ksatxn--12c1fe0bradescorporationishinoshimatsuurabrynewjerseybus" + + "kerudinewportlligatmparliamentoyosatoyonakagyokutoyokawabuzenish" + + "iokoppegardyndns-freeboxoslodingenishitosashimizunaminamibosognd" + + "alottokorozawabuzzweirbwfashionishiwakis-a-cpadualstackspace-to-" + + "rentalstomakomaibarabzhitomirumalatvuopmicrolightingrimstadyndns" + + "-homednsanokasaokaminokawanishiaizubangecommunitysnesardegnaroyc" + + "omobaracomparemarkerryhotelsardiniacompute-1computerhistoryofsci" + + "ence-fictioncomsecuritytacticsarlutskashiwazakiyosemitecondoshic" + + "hinohealth-carereformitakeharaconferenceconstructionconsuladohar" + + "uovatrani-andria-barletta-trani-andriaconsultanthropologyconsult" + + "ingvolluxembourgruecontactraniandriabarlettatraniandriacontagema" + + "tsubaracontemporaryarteducationalchikugojomedio-campidano-medioc" + + "ampidanomediocontractorskenconventureshinodearthdfcbankasukabedz" + + "in-the-bandaioiraseeklogest-mon-blogueurovisionionjukudoyamainte" + + "nancebetsuikidsmynasushiobarackmazerbaijan-mayenebakkeshibechamb" + + "agriculturennebudapest-a-la-masionthewifiat-band-campaniacooking" + + "channelsdvrdnsdojoetsuwanouchikujogaszczytnordlandyndns-weberlin" + + "colncoolkuszkolahppiacenzagancooperativano-frankivskodjeffersonc" + + "openhagencyclopedichernivtsiciliacorsicagliaribeiraokinawashiros" + + "atochiokinoshimaizuruhrcorvettemasekasumigaurawa-mazowszextraspa" + + "cekitagatajirissagamiharacosenzakopanerairguardiannakadomarinebr" + + "askaunjargalsaceocosidnsfor-better-thanawatchesarpsborguitarsaru" + + "futsunomiyawakasaikaitakoelncostumedizinhistorischesasayamacouch" + + "potatofriesasebofagecounciluxurycouponsaskatchewancoursesassaris" + + "-a-doctoraycq-acranbrookuwanalyticsaudacreditcardyndns-wikiracre" + + "ditunioncremonashgabadaddjaguarqhachiojiyahoooshikamaishimodatec" + + "rewhoswhokksundyndns-workisboringujoinvillewismillercricketrzync" + + "rimeast-kazakhstanangercrotonexus-3crownprovidercrsvparsauherady" + + "ndns1cruisesavannahgacryptonomichigangwoncuisinellair-traffic-co" + + "ntrolleyculturalcentertainmentranoycuneocupcakecuritibaghdadynns" + + "aves-the-whalessandria-trani-barletta-andriatranibarlettaandriac" + + "xn--12cfi8ixb8luzerncyberlevagangaviikanonjis-a-financialadvisor" + + "-aurdalvivanovodkamisatokashikiwakunigamiharufcfancymrussiacyona" + + "barulsandoycyoutheworkpccwiiheyakagefgushikamifuranorth-kazakhst" + + "anfhvalerfidonnakanotoddenfieldynvpnchernovtsykkylvenetogakushim" + + "otoganewyorkshirecipesaro-urbino-pesarourbinopesaromasvuotnakaiw" + + "amizawassamukawataricohdatsunanjoburgriwataraidyndns-iparmattele" + + "fonicapitalonewspaperfigueresinstagingxn--1ctwolominamatakkokami" + + "noyamaxunusualpersonfilateliafilegearfilminamimakis-a-geekaszuby" + + "finalfinancefineartscholarshipschoolfinlandyroyrvikingulenfinnoy" + + "firebaseappartis-a-greenfirenzefirestonefirmdaleirvikatowicefish" + + "ingolffanschulefitjarfitnessettlementransurlfjalerflesbergflickr" + + "agerotikakamigaharaflightschwarzgwangjuniperflirflogintohmalvika" + + "tsushikabeeldengeluidfloraflorencefloridavvesiidazaifudaigokasel" + + "jordfloripaderbornfloristanohatakahamamurogawaflorogerschweizflo" + + "wersciencecentersciencehistoryflynnhosting-clusterflynnhubarclay" + + "s3-sa-east-1fndfor-ourfor-someeresistancefor-theaterforexrothach" + + "irogatakamoriokalmykiaforgotdnscientistockholmestrandforli-cesen" + + "a-forlicesenaforlikescandynamic-dnscjohnsonforsaleitungsenforsan" + + "dasuoloftrapaniizafortalfortmissoulancashireggio-calabriafortwor" + + "thadanorthwesternmutualforuminamiminowafosnescotlandfotaruis-a-g" + + "urufoxfordebianfozorafredrikstadtvscrapper-sitefreeddnsgeekgalax" + + "yfreemasonryfreesitevadsochildrensgardenfreetlscrappingfreiburgf" + + "reightravelchannelfreseniuscountryestateofdelawarezzoologyfribou" + + "rgfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-g" + + "iuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-gi" + + "uliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriuliv" + + "giuliafrlfroganscrysechirurgiens-dentistes-en-francefrognfroland" + + "from-akrehamnfrom-alfrom-arfrom-azfrom-canonoichinomiyakefrom-co" + + "dynaliasdaburfrom-ctravelersinsurancefrom-dchiryukyuragifuchungb" + + "ukharafrom-dedyn-ip24from-flanderservegame-serversicherungfrom-g" + + "ausdalfrom-higashiagatsumagoianiafrom-iafrom-idfrom-ilfrom-inche" + + "onfrom-kservehalflifestylefrom-kyowariasahikawafrom-lancasterfro" + + "m-mangonohejis-a-hard-workerfrom-mdfrom-meethnologyfrom-mifunefr" + + "om-mnfrom-modalenfrom-mservehttpartnerservehumourfrom-mtnfrom-nc" + + "hitachinakagawatchandclockashibatakashimarumorimachidafrom-ndfro" + + "m-nefrom-nh-servebbserveirchitosetogitsuliguriafrom-njaworznotog" + + "awafrom-nminamiogunicomcastresindeviceserveminecraftrdfrom-nv-in" + + "foodnetworkshoppingfrom-nyfrom-ohtawaramotoineppuboliviajessheim" + + "periafrom-oketohnoshooguyfrom-orfrom-padovaksdalfrom-pratohobby-" + + "sitexashorokanaiefrom-rivnefrom-schoenbrunnfrom-sdfrom-tnfrom-tx" + + "n--1lqs03nfrom-utazuerichardlillehammerfeste-ipartservemp3from-v" + + "al-daostavalleyfrom-vtrentino-a-adigefrom-wafrom-wielunnerfrom-w" + + "valled-aostatoilfrom-wyfrosinonefrostalowa-wolawafroyahikobeardu" + + "baiduckdnservep2partyfstavernfujiiderafujikawaguchikonefujiminok" + + "amoenairtelecitychyattorneyagawakeisenbahnfujinomiyadafujiokayam" + + "angyshlakasamatsudontexistmein-vigorgefujisatoshonairtrafficplex" + + "us-1fujisawafujishiroishidakabiratoridefensells-for-lesservepics" + + "ervequakefujitsurugashimaringatlantakaharufujixeroxn--1lqs71dfuj" + + "iyoshidafukayabeatservesarcasmatartanddesignfukuchiyamadafukudom" + + "inichocolatelevisionissedalouvreisenisshingugefukuis-a-hunterfuk" + + "umitsubishigakirovogradoyfukuokazakiryuohadselfipasadenaritakura" + + "shikis-a-knightpointtokamachintaifun-dnsaliasiafukuroishikarikat" + + "urindalfukusakisarazurewebsiteshikagamiishibukawafukuyamagatakah" + + "ashimamakishiwadafunabashiriuchinadafunagatakahatakaishimogosenf" + + "unahashikamiamakusatsumasendaisennangoodyearfundaciofuoiskujukur" + + "iyamaniwakuratextileksvikatsuyamarylandfuosskoczowildlifedorainf" + + "racloudcontrolledogawarabikomaezakirunore-og-uvdalfurnitureggio-" + + "emilia-romagnakatombetsumitakagiizefurubirafurudonostiaarpassage" + + "nservicesettsurgeonshalloffameloyalistjordalshalsenfurukawais-a-" + + "landscaperfusodegaurafussaikisofukushimannorfolkebiblelveruminam" + + "isanrikubetsupportrentino-aadigefutabayamaguchinomigawafutboldly" + + "goingnowhere-for-morenakatsugawafuttsurugiminamitanefuturecmseva" + + "stopolefuturehostingfuturemailingfvgfylkesbiblackfridayfyresdalh" + + "angoutsystemscloudfunctionsevenassisicilyhannanmokuizumodenakaya" + + "mapassenger-associationhannosegawahanyuzenhapmirhareidsbergenhar" + + "stadharvestcelebrationhasamarburghasaminami-alpssells-itrentino-" + + "altoadigehashbanghasudahasura-appatriahasvikazohatogayaitakanabe" + + "autysfjordhatoyamazakitakamiizumisanofidelityhatsukaichikaiseis-" + + "a-linux-useranishiaritabashijonawatehattfjelldalhayashimamotobun" + + "gotakadapliernewmexicoalhazuminobusellsyourhomegoodsewilliamhill" + + "hbodoes-itvedestrandhelsinkitakatakanezawahembygdsforbundhemnesh" + + "aris-a-llamarriottrentino-s-tirollagrigentomologyeonggiehtavuoat" + + "nagaivuotnagaokakyotambabydgoszczecinemaceratabusebastopologyeon" + + "gnamegawakayamadridhemsedalhepforgeherokussldheroyhgtvalledaosta" + + "vangerhigashichichibunkyonanaoshimageandsoundandvisionhigashihir" + + "oshimanehigashiizumozakitakyushuaiahigashikagawahigashikagurasoe" + + "dahigashikawakitaaikitamihamadahigashikurumeguromskoghigashimats" + + "ushimarcheapaviancargodaddyn-vpnplus-2higashimatsuyamakitaakitad" + + "aitoigawahigashimurayamamotorcyclesharpfizerhigashinarusembokuki" + + "tamotosumy-routerhigashinehigashiomihachimanaustdalhigashiosakas" + + "ayamanakakogawahigashishirakawamatakaokaluganskydivinghigashisum" + + "iyoshikawaminamiaikitanakagusukumodernhigashitsunoshiroomurahiga" + + "shiurausukitashiobarahigashiyamatokoriyamanashifteditchyouripgfo" + + "ggiahigashiyodogawahigashiyoshinogaris-a-musicianhiraizumisatoka" + + "izukamakurazakitaurayasudahirakatashinagawahiranais-a-nascarfanh" + + "irarahiratsukagawahirayaizuwakamatsubushikusakadogawahistorichou" + + "seshawaiijimaritimoduminamiyamashirokawanabelembetsukubankazunow" + + "tvallee-aosteroyhitachiomiyagildeskaliszhitachiotagoperauniteroi" + + "zumizakisosakitagawahitraeumtgeradellogliastradinghjartdalhjelme" + + "landholeckobierzyceholidayhomeipharmacienshellaspeziahomelinkddi" + + "elddanuorrikuzentakataiwanairlinedre-eikerhomelinuxn--1qqw23ahom" + + "eofficehomesecuritymacaparecidahomesecuritypchofunatoriginsurecr" + + "eationiyodogawahomesenseminehomeunixn--2m4a15ehondahoneywellbein" + + "gzonehongotembaixadahonjyoitakarazukameokameyamatotakadahorninda" + + "lhorseoullensvanguardhortendofinternet-dnshimojis-a-nurservebeer" + + "hospitalhoteleshimokawahotmailhoyangerhoylandetroitskypehumaniti" + + "eshimokitayamahurdalhurumajis-a-painteractivegarsheis-a-patsfanh" + + "yllestadhyogoris-a-personaltrainerhyugawarahyundaiwafunejewelryj" + + "ewishartgalleryjfkharkovanylvenicejgorajlcube-serverrankoshigaya" + + "kumoldelmenhorstalbanshinichinanjlljmphilatelyjnjcphiladelphiaar" + + "eadmyblogsitejoyentrentino-sued-tiroljoyokaichibalatinoipifonymi" + + "nanojpmorganjpnjprshinjournalismailillesandefjordjurkoshunantank" + + "hmelnitskiyamarylhurstjohnkosugekotohiradomainshinjukumanokotour" + + "akouhokutamakis-a-techietis-a-photographerokuappharmacyshimonita" + + "yanagithubusercontentrentino-stirolkounosupplieshinkamigotoyohas" + + "himotottoris-a-therapistoiakouyamashikekouzushimashikis-an-accou" + + "ntantshimonosekikawakozagawakozakis-an-actorkozowinbarrel-of-kno" + + "wledgeologyonagoyaustrheimatunduhrennesoyolasitebizenakasatsunai" + + "rportland-4-salernoboribetsucks3-eu-central-1kpnkppspdnshinshino" + + "tsurgerykrasnodarkredstonekristiansandcatshinshirokristiansundkr" + + "odsheradkrokstadelvaldaostarnbergkrymincommbankhmelnytskyivaokum" + + "atorinokumejimasoykumenantokonamegatakatoris-an-actresshimosuwal" + + "kis-a-playerkunisakis-an-anarchistoricalsocietykunitachiarailway" + + "kunitomigusukumamotoyamashikokuchuokunneppugliakunstsammlungkuns" + + "tunddesignkuokgrouphoenixn--30rr7ykurehabmerkurgankurobelaudible" + + "borkangerkurogiminamiashigarakuroisoftwarendalenugkuromatsunais-" + + "an-artisteinkjerusalembroiderykurotakikawasakis-an-engineeringku" + + "shirogawakustanais-an-entertainerkusupplykutchanelkutnokuzumakis" + + "-bykvafjordkvalsundkvamfamberkeleykvanangenkvinesdalkvinnheradkv" + + "iteseidskogkvitsoykwpspiegelkzmitoyoakemiuramiyazumiyotamanomjon" + + "dalenmlbfanmonstermontrealestatefarmequipmentrentinoa-adigemonza" + + "-brianzaporizhzhiamonza-e-della-brianzapposhintomikasaharamonzab" + + "rianzaptokyotangotsukitahatakamatsukawamonzaebrianzaramonzaedell" + + "abrianzamoonscalemoparachutingmordoviamoriyamatsumotofukemoriyos" + + "himinamiawajikis-into-animeiwamarshallstatebankfhappoumormonmout" + + "hagakhanamigawamoroyamatsunomortgagemoscowindmillmoseushistorymo" + + "sjoenmoskeneshinyoshitomiokamogawamosshiojirishirifujiedamosvikn" + + "x-serveronamsskoganeis-a-rockstarachowicemoteginowaniihamatamaka" + + "wajimansionshioyanaizumoviemovimientolgamovistargardmtpchoyodoba" + + "shichikashukujitawaramtranbymuenstermuginozawaonsenmuikamisunaga" + + "wamukodairamulhouserveblogspotrentinoaadigemunakatanemuncienciam" + + "uosattemuphonefosshirahamatonbetsurnadalmurmanskolobrzegersundmu" + + "rotorcraftrentinoalto-adigemusashimurayamatsusakahoginankokubunj" + + "is-into-carshimotsukemusashinoharamuseetrentinoaltoadigemuseumve" + + "renigingmusicarbonia-iglesias-carboniaiglesiascarboniamutsuzawam" + + "y-vigorlicemy-wanggouvicenzamyactivedirectorymyasustor-elvdalmyc" + + "dn77-securecifedexhibitionmyddnskingmydissentrentinos-tirolmydro" + + "boehringerikemydshirakofuefukihaborokunohealthcareershiranukanag" + + "awamyeffectrentinostirolmyfirewallonieruchomoscienceandindustryn" + + "myfritzmyftpaccesshiraois-into-cartoonshimotsumamyhome-serversai" + + "lleshiraokananiimihoboleslawiechristiansburgrondarmykolaivaporcl" + + "oudmymailermymediapchristmasakinderoymyokohamamatsudamypephotogr" + + "aphysiomypetshiratakahagitlabormyphotoshibalestrandabergamoareke" + + "ymachinewhampshirebungoonombresciamypsxn--32vp30hagebostadmysecu" + + "ritycamerakermyshopblockshishikuis-into-gamessinazawamytis-a-boo" + + "kkeeperugiamytuleapiagetmyipictetrentinosud-tirolmyvnchromedicin" + + "akamagayachtsantabarbaramywireitrentinosudtirolpinkomaganepionee" + + "rpippulawypiszpittsburghofauskedsmokorsetagayasells-for-unzenpiw" + + "atepixolinopizzapkomakiyosunndalplanetariuminnesotaketakatsukis-" + + "certifieducatorahimeshimamateramobilyplantationplantshitaramapla" + + "tformshangrilanshizukuishimofusaitamatsukuris-lostre-toteneis-a-" + + "republicancerresearchaeologicaliforniaplaystationplazaplchungnam" + + "dalseidfjordyndns-mailucaniaplumbingoplurinacionalpmnpodzonepohl" + + "poivronpokerpokrovskomatsushimasfjordenpoliticarrierpolitiendapo" + + "lkowicepoltavalle-aostarostwodzislawindowshizuokanazawapomorzesz" + + "owinnershoujis-not-certifiedunetbankhakassiapordenonepornporsang" + + "erporsanguidell-ogliastraderporsgrunnanyokoshibahikariwanumatake" + + "tomisatoshimapoznanpraxis-a-bruinsfanprdpreservationpresidioprgm" + + "rprimeldalprincipeprivatizehealthinsuranceprochowiceproductionsh" + + "owaprofesionalprogressivegaskvolloabathsbchurchaseljeepsongdalen" + + "viknaharimalopolskanlandyndns-office-on-the-webcampinashikiminoh" + + "kurapromombetsurfbsbxn--12co0c3b4evalleaostaticsavonarusawaprope" + + "rtyprotectionprotonetrentinosued-tirolprudentialpruszkowioshowti" + + "memergencyahabahcavuotnagarahkkeravjuegoshikikonaikawachinaganoh" + + "aramcoachampionshiphoptobishimagentositecnologiaprzeworskogptplu" + + "sgardenpupictureshisognepvhaibarakitahiroshimaoris-a-lawyerpvtre" + + "ntinosuedtirolpwciprianiigataishinomakindlegnicafederationpzqldq" + + "ponqslgbtrentoyonezawaquicksyteshriramlidlugolekafjordquipelemen" + + "tsienarutomobellevuelosangelesjabbottrevisohughesigdalqvcirclego" + + "doesntexisteingeekashiharasrtroandinosaurepaircraftrogstadsrvare" + + "servecounterstrikestoragestordalstoregontrailroadstorfjordstorjd" + + "evcloudfrontdoorstpetersburgstreamsterdamnserverbaniastudiostudy" + + "ndns-at-homedepotenzamamidsundstuff-4-salestufftoread-booksnesir" + + "dalstuttgartromsakakinokiasusakis-savedsusonosuzakaniepcesuzukan" + + "makiwiensuzukis-slickharkivalleeaosteigensvalbardunloppacificirc" + + "ustomersveiosvelvikomvuxn--2scrj9choshibuyachiyodavvenjargaulard" + + "alowiczest-le-patronsvizzerasvn-reposjcbnlswedenswidnicartoonart" + + "decologiaswiebodzindianapolis-a-bloggerswiftcoverswinoujsciencea" + + "ndhistoryswisshikis-uberleetrentino-sud-tirolsynology-dslingtush" + + "uissier-justicetuvalle-daostatic-accessnoasaitotaltuxfamilytwmai" + + "lvenneslaskerrylogisticsokaneyamazoevestfoldvestnesokndalvestre-" + + "slidrepbodynathomebuiltrusteevestre-totennishiawakuravestvagoyve" + + "velstadvibo-valentiavibovalentiavideovillasnesoddenmarkhangelskj" + + "akdnepropetrovskiervaapsteiermarkongsvingervinnicasacamdvrcampin" + + "agrandebugattipschlesischesolarssonvinnytsiavipsinaappiemontevir" + + "giniavirtualvirtueeldomeindianmarketingvirtuelvisakegawaviterbok" + + "nowsitallvivoldavixn--3bst00misakis-foundationvlaanderenvladikav" + + "kazimierz-dolnyvladimirvlogoipilotshisuifuelblagdenesnaaseraling" + + "enkainanaejrietisalatinabenonichryslervolkswagentsolognevologdan" + + "skoninjambylvolvolkenkundenvolyngdalvossevangenvotevotingvotoyon" + + "owiwatsukiyonoticiaskimitsubatamibudejjuedischesapeakebayernrtrv" + + "arggatromsojamisonwloclawekonsulatrobeepilepsydneywmflabsolundbe" + + "ckommuneworldworse-thandawowitdkonskowolayangrouphilipsynology-d" + + "iskstationwpdevcloudwritesthisblogsytewroclawithgoogleapisa-hock" + + "eynutsiracusakatakinouewtcmisasaguris-gonewtfbx-ostrowwlkpmgunma" + + "nxn--1ck2e1barclaycards3-fips-us-gov-west-1wuozuwwwithyoutubenev" + + "entoeidsvollwzmiuwajimaxn--42c2d9axn--45br5cylxn--45brj9citadeli" + + "veryxn--45q11citichernigovernmentoyotaris-a-cubicle-slavellinota" + + "irestaurantoyotomiyazakis-a-democratoyotsukaidoxn--4gbriminingxn" + + "--4it168dxn--4it797kooris-a-soxfanxn--4pvxs4allxn--54b7fta0ccivi" + + "laviationxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp49civilisationx" + + "n--5rtq34kopervikhersonxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--" + + "6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264civilizationxn--80adxh" + + "ksolutionsilkomforbargainstitutelemarkarateu-1xn--80ao21axn--80a" + + "qecdr1axn--80asehdbarsyonlinewhollandiscoveryonaguniversityoriik" + + "aratsuginamikatagamilitaryoshiokaracoldwarmiastageu-2xn--80aswgx" + + "n--80audnedalnxn--8ltr62koryokamikawanehonbetsurutaharaxn--8pvr4" + + "uxn--8y0a063axn--90a3academiamicaaarborteaches-yogasawaracingxn-" + + "-90aeroportalaheadjudaicable-modemocraciaxn--90aishobarakawagoex" + + "n--90azhytomyrxn--9dbhblg6dietcimdbashkiriauthordalandeportenrig" + + "htathomeftpalmaseratibigawastronomy-gatewayokosukanzakiyosatokig" + + "awagrocerybnikahokutobamagazineat-url-o-g-i-natuurwetenschappena" + + "umburgjerdrumeteorappalermomahachijolstereportarumizusawaetnagah" + + "amaroygardendoftheinternetflixilovecollegefantasyleaguernseybolt" + + "arnobrzegyptianaturhistorisches3-ap-northeast-2ixboxenapponazure" + + "-mobile12hpaleobirabogadocscbgdyniabruzzoologicalvinklein-addram" + + "menuernberggfarmerseine164xn--9dbq2axn--9et52uxn--9krt00axn--and" + + "y-iraxn--aroport-byandexn--3ds443gxn--asky-iraxn--aurskog-hland-" + + "jnbasilicataniautomotiveconomiasakuchinotsuchiurakawalmartataran" + + "toyakokonoehimejibmdgcahcesuolocalhostrodawaraumalborkdalaziocea" + + "nographics3-eu-west-1xn--avery-yuasakuhokkaidoomdnsiskinkyotobet" + + "sumidatlanticivilwarmanagementoyouraxn--b-5gaxn--b4w605ferdxn--b" + + "ck1b9a5dre4claimsantacruzsantafedjejuifminamiizukamishihoronobea" + + "uxartsandcraftsantamariakexn--bdddj-mrabdxn--bearalvhki-y4axn--b" + + "erlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachika" + + "tsuuraxn--bievt-0qa2xn--bjarky-fyaotsurreyxn--bjddar-ptamayufuet" + + "tertdasnetzxn--blt-elabourxn--bmlo-graingerxn--bod-2natalxn--brn" + + "ny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigat" + + "ion-aptibleaseating-organicbcn-north-1xn--brum-voagatrysiljanxn-" + + "-btsfjord-9zaxn--c1avgxn--c2br7gxn--c3s14misawaxn--cck2b3basketb" + + "allyngenhktatsunoddautoscanadaejeonbukarasjohkamikoaniikappueblo" + + "ckbustermezgoraugustowadaegubambleclerc66xn--cg4bkis-very-badajo" + + "zxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-sslattumisconfusedxn--comuni" + + "caes-v6a2oxn--correios-e-telecomunicaes-ghc29axn--czr694batodayu" + + "kindustriaveroykeniwaizumiotsukumiyamazonawsadodgemologicallilly" + + "ombolzanord-frontiereviewskrakowebhostingjerstadotsuruokakegawau" + + "kraanghkepnogifts3-ap-southeast-2xn--czrs0tulanxesslupskommunalf" + + "orbundxn--czru2dxn--czrw28batsfjordishakotanhlfanhs3-us-gov-west" + + "-1xn--d1acj3bauhausposts-and-telecommunicationsncfdisrechtranaka" + + "muratajimidoriopretogoldpoint2thisamitsukeu-3xn--d1alfaromeoxn--" + + "d1atuneslzxn--d5qv7z876clanbibaidarmeniaxn--davvenjrga-y4axn--dj" + + "rs72d6uyxn--djty4kosaigawaxn--dnna-grajewolterskluwerxn--drbak-w" + + "uaxn--dyry-iraxn--e1a4cldmailuccapetownnews-stagingrongaxn--eckv" + + "dtc9dxn--efvn9somaxn--efvy88hair-surveillancexn--ehqz56nxn--elqq" + + "16hakatanortonxn--estv75gxn--eveni-0qa01gaxn--f6qx53axn--fct429k" + + "osakaerodromegallupinbarreauctionflfanfshostrowiecaseihichisobet" + + "suldalimoliserniaustraliaisondriobranconagawalesundemoneyokozebi" + + "nordreisa-geekaragandamusementashkentatamotors3-ap-southeast-1pa" + + "sswordd-dnshome-webservercellikes-piedmonticellocus-4xn--fhbeiar" + + "nxn--finny-yuaxn--fiq228c5hsomnarviikamitondabayashiogamagorizia" + + "xn--fiq64bbcasertairavennagatorockartuzyukuhashimoichinosekigaha" + + "ravocatanzarowebredirectmetacentrumetlifeinsurancempresashibetsu" + + "kuiitatebayashiibajddarchitecturealtydalipayomitanoceanographiqu" + + "emrevistanbulminamidaitomandalimanowarudaurskog-holandroverhalla" + + "-speziajudygarlanddnss3-ap-south-1kappchizippodhaleangaviikadena" + + "amesjevuemielno-ip6xn--fiqs8sooxn--fiqz9sopotritonxn--fjord-lrax" + + "n--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--fpcrj9c3dxn--frde" + + "-grandrapidsor-odalxn--frna-woaraisaijosoyrorosor-varangerxn--fr" + + "ya-hraxn--fzc2c9e2clickashiwaraxn--fzys8d69uvgmailxn--g2xx48clin" + + "ichernihivguccieszynissandnessjoenissayokkaichiropracticheltenha" + + "m-radio-opencraftrainingripescaravantaaxn--gckr3f0fbxosaxoxn--ge" + + "crj9cliniquenoharaxn--ggaviika-8ya47hakodatexn--gildeskl-g0axn--" + + "givuotna-8yasakaiminatoyookannamilanotteroyxn--gjvik-wuaxn--gk3a" + + "t1exn--gls-elacaixaxn--gmq050is-very-evillagexn--gmqw5axn--h-2fa" + + "ilxn--h1aeghakonexn--h2breg3evenesorfoldxn--h2brj9c8clintonoshoe" + + "santoandreamhostersanukis-a-designerimarnardalucernexn--h3cuzk1d" + + "igitalxn--hbmer-xqaxn--hcesuolo-7ya35bbtattoolsztynsettlers3-us-" + + "west-1xn--hery-iraxn--hgebostad-g3axn--hmmrfeasta-s4accident-pre" + + "vention-webhopenairbusantiquest-a-la-maisondre-landroidvagsoyeri" + + "cssonyoursidealerimo-i-ranadexeterxn--hnefoss-q1axn--hobl-iraxn-" + + "-holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-" + + "54axn--i1b6b1a6a2exn--imr513nxn--indery-fyasugivingxn--io0a7is-v" + + "ery-goodhandsonxn--j1aefedorapeopleikangerxn--j1amhakubahccavuot" + + "nagareyamakeupowiathletajimabaridagawalbrzycharternidxn--j6w193g" + + "xn--jlq61u9w7bbvacationswatch-and-clockerhcloudns3-us-west-2xn--" + + "jlster-byasuokanraxn--jrpeland-54axn--jvr189mishimasudaxn--k7yn9" + + "5exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-w" + + "oaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--3e0b707exn--koluok" + + "ta-7ya57hakuis-a-liberalxn--kprw13dxn--kpry57dxn--kpu716fedorapr" + + "ojectransportexn--kput3is-very-nicexn--krager-gyatomitamamuraxn-" + + "-kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49j" + + "dfastlylbarcelonagasakikuchikuseikarugamvikarasjokarasuyamarugam" + + "e-hostrolekamiminers3-external-1xn--ksnes-uuaxn--kvfjord-nxaxn--" + + "kvitsy-fyatsukanumazuryxn--kvnangen-k0axn--l-1fairwindsorocabals" + + "fjordxn--l1accentureklamborghinikis-very-sweetpepperxn--laheadju" + + "-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagavi" + + "ika-52bentleyurihonjournalistgoryusuharavoues3-eu-west-2xn--lesu" + + "nd-huaxn--lgbbat1ad8jelenia-goraxn--lgrd-poacctunkongsbergxn--lh" + + "ppi-xqaxn--linds-pramericanarturystykanoyaltakasakiyokawaraxn--l" + + "ns-qlapyatigorskoseis-a-studentalxn--loabt-0qaxn--lrdal-sraxn--l" + + "renskog-54axn--lt-liaclothingdustkagoshimalselvendrellukowhaling" + + "rossetouchijiwadegreexn--lten-granexn--lury-iraxn--m3ch0j3axn--m" + + "ely-iraxn--merker-kuaxn--mgb2ddesorreisahayakawakamiichikawamisa" + + "toursimple-urlxn--mgb9awbfeiraquarellebesbyglandynulvikasuyanaga" + + "waxn--mgba3a3ejtuscanyxn--mgba3a4f16axn--mgba3a4franamizuholding" + + "smilevangerxn--mgba7c0bbn0axn--mgbaakc7dvfermochizukirkenesbscho" + + "koladenxn--mgbaam7a8hakusandiegooglecodespotrentino-alto-adigexn" + + "--mgbab2bdxn--mgbai9a5eva00beppublishproxyzjampagefrontappalmspr" + + "ingsakerxn--mgbai9azgqp6jeonnamerikawauexn--mgbayh7gpalacexn--mg" + + "bb9fbpobanazawaxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mg" + + "berp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--" + + "mgbpl2fhskoleirfjordxn--mgbqly7c0a67fbcngroundhandlingroznyxn--m" + + "gbqly7cvafranziskanerdpolicexn--mgbt3dhdxn--mgbtf8flatangerxn--m" + + "gbtx2beskidyn-o-saurlandes3-website-ap-northeast-1xn--mgbx4cd0ab" + + "bvieeexn--mix082ferraraxn--mix891ferrarittoguraxn--mjndalen-64ax" + + "n--mk0axindigenaklodzkochikushinonsenergyxn--mk1bu44cnsaobernard" + + "ownloadyndns-picsaogoncartierxn--mkru45is-with-thebandovre-eiker" + + "xn--mlatvuopmi-s4axn--mli-tlaquilanciaxn--mlselv-iuaxn--moreke-j" + + "uaxn--mori-qsakuragawaxn--mosjen-eyawaraxn--mot-tlarvikosherbroo" + + "kegawaxn--mre-og-romsdal-qqbestbuyshouses3-website-ap-southeast-" + + "1xn--msy-ula0haldenxn--mtta-vrjjat-k7afamilycompanycntoystre-sli" + + "drettozawaxn--muost-0qaxn--mxtq1missilezajsklabudhabikinokawabar" + + "thaebaruminamiuonumassa-carrara-massacarraramassabusinessebykleg" + + "allocalhistoryggeelvinckaufenxn--ngbc5azdxn--ngbe9e0axn--ngbrxn-" + + "-3hcrj9cistrondheimmobilienxn--nit225koshimizumakizunokunimimata" + + "kasugais-a-teacherkassymantechnologyxn--nmesjevuemie-tcbaltimore" + + "-og-romsdalpha-myqnapcloudaccesscambridgestoneuesortlandxn--nnx3" + + "88axn--nodessakuraisleofmanchesterxn--nqv7fs00emaxn--nry-yla5gxn" + + "--ntso0iqx3axn--ntsq17gxn--nttery-byaeserveexchangexn--nvuotna-h" + + "waxn--nyqy26axn--o1achattanooganordkappimientakazakis-leetnedalx" + + "n--o3cw4halsaintlouis-a-anarchistoireggiocalabriaxn--o3cyx2axn--" + + "od0algxn--od0aq3betainaboxfusejnynysagaeroclubmedecincinnationwi" + + "dealstahaugesunderseaportsinfolldalabamagasakishimabaraogakibich" + + "uomutashinaindustriesteambulanceu-4xn--ogbpf8flekkefjordxn--oppe" + + "grd-ixaxn--ostery-fyawatahamaxn--osyro-wuaxn--p1acferreroticampo" + + "bassociatestinguovdageaidnuslivinghistoryxn--p1aissmarterthanyou" + + "xn--pbt977coguchikuzenxn--pgbs0dhlxn--porsgu-sta26fetsundynv6xn-" + + "-pssu33lxn--pssy2uxn--q9jyb4collectionxn--qcka1pmckinseyxn--qqqt" + + "11misugitokuyamatsumaebashikshacknetrentino-suedtirolxn--qxamune" + + "ustarhubsoruminternationalfirearmshintokushimaxn--rady-iraxn--rd" + + "al-poaxn--rde-ulavagiskexn--rdy-0nabariwchonanbuildingroks-thisa" + + "yamanobeokakudamatsuexn--rennesy-v1axn--rhkkervju-01aflakstadaok" + + "agakicks-assedicolognextdirectozsdeloittemp-dnsaotomelhusdecorat" + + "iveartsapodlasiellaktyubinskiptveterinairealtorlandyndns-remotew" + + "dyndns-serverdaluroyxn--rholt-mragowoodsideltaitogliattiresouthc" + + "arolinarvikomonoxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa" + + "-5nativeamericanantiquesouthwestfalenxn--risr-iraxn--rland-uuaxn" + + "--rlingen-mxaxn--rmskog-byaxn--rny31hammarfeastafricapebretonami" + + "crosoftbankautokeinowruzhgorodeoxn--rovu88bhzcasinorddalindaskoy" + + "abearalvahkijobserverisignieznogataijinfinitintuitaxihuanikkoebe" + + "nhavnikolaevents3-website-ap-southeast-2xn--rros-granvindafjordx" + + "n--rskog-uuaxn--rst-0naturalhistorymuseumcenterxn--rsta-francais" + + "eharaxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithruhere" + + "dumbrellajollamericanexpressexyxn--s9brj9colonialwilliamsburgrpa" + + "rocherkasyno-dsapporoxn--sandnessjen-ogbizxn--sandy-yuaxn--seral" + + "-lraxn--ses554gxn--sgne-gratangenxn--skierv-utazassnasabaerobati" + + "cketsowaxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxax" + + "n--slat-5naturalsciencesnaturellespjelkavikomorotsukamiokamikita" + + "yamatsuris-a-socialistcgrouphdxn--slt-elabcgxn--smla-hraxn--smna" + + "-gratis-a-bulls-fanxn--snase-nraxn--sndre-land-0cbremangerxn--sn" + + "es-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1" + + "axn--sr-varanger-ggbieigersundivtasvuodnakaniikawatanaguraxauste" + + "vollavangenaval-d-aosta-valleyokotebinagisoccertificationavigati" + + "onavoibestadds3-ca-central-1xn--srfold-byaxn--srreisa-q1axn--sru" + + "m-grazxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbielawal" + + "terxn--stre-toten-zcbspreadbettingxn--t60b56axn--tckweatherchann" + + "elxn--tiq49xqyjetztrentino-sudtirolxn--tjme-hraxn--tn0agrinet-fr" + + "eakspydebergxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trgstad-r" + + "1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvaroyxn--uc0ay4" + + "axn--uist22hamurakamigoris-a-libertarianxn--uisz3gxn--unjrga-rta" + + "obaomoriguchiharagusartsrlxn--unup4yxn--uuwu58axn--vads-jraxn--v" + + "ard-jraxn--vegrshei-c0axn--vermgensberater-ctbiellaakesvuemielec" + + "ceverbankareliancevje-og-hornnes3-website-eu-west-1xn--vermgensb" + + "eratung-pwbieszczadygeyachimataikikugawarszawashingtondclkariyam" + + "elbournexn--vestvgy-ixa6oxn--vg-yiabkhaziaxn--vgan-qoaxn--vgsy-q" + + "oa0jevnakershuscultureggioemiliaromagnamsosnowiechoseiroumuenche" + + "nxn--vgu402coloradoplateaudioxn--vhquvbarrell-of-knowledgeometre" + + "-experts-comptables3-us-east-2xn--vler-qoaxn--vre-eiker-k8axn--v" + + "rggt-xqadxn--vry-yla5gxn--vuq861bievatmallorcadaques3-website-sa" + + "-east-1xn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1columb" + + "usheyxn--wgbl6axn--xhq521bifukagawashtenawdev-myqnapcloudapplebt" + + "imnetzlgjovikarlsoyusuisserveftpanamatta-varjjatjeldsundivttasvu" + + "otnakanojohanamakinoharaxn--xkc2al3hye2axn--xkc2dl3a5ee0hangglid" + + "ingxn--y9a3aquariumitourismolangevagrarchaeologyeongbukmpspbaref" + + "ootballfinanzgorzeleccoffeedbackplaneapplinziiyamanouchikuhokury" + + "ugasakitchenayorovigovtateshinanomachimkentateyamaustinnavuotnar" + + "ashinobninsk12xn--yer-znaturbruksgymnxn--yfro4i67oxn--ygarden-p1" + + "axn--ygbi2ammxn--3oq18vl8pn36axn--ystre-slidre-ujbihorologyuucon" + + "nectjmaxxxfinityuzawaxn--zbx025dxn--zf0ao64axn--zf0avxn--3pxu8ko" + + "nyvelolxn--zfr164bikedagestangeorgeorgiaxperiaxz" + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [ 0 bits] unused +// [10 bits] children index +// [ 1 bits] ICANN bit +// [15 bits] text index +// [ 6 bits] text length +var nodes = [...]uint32{ + 0x31a803, + 0x284d84, + 0x382f06, + 0x2f37c3, + 0x2f37c6, + 0x37af86, + 0x3a7a03, + 0x31b604, + 0x322487, + 0x382b48, + 0x1a00742, + 0x32e147, + 0x3672c9, + 0x2b4eca, + 0x2b4ecb, + 0x232183, + 0x2ab9c6, + 0x238485, + 0x1e01482, + 0x203b44, + 0x260543, + 0x201485, + 0x2215842, + 0x332603, + 0x271b0c4, + 0x31fe05, + 0x2a00102, + 0x38194e, + 0x256483, + 0x39cbc6, + 0x2e03d02, + 0x2c8047, + 0x23e146, + 0x3205c42, + 0x257dc3, + 0x257dc4, + 0x357406, + 0x205d08, + 0x277146, + 0x302004, + 0x3600602, + 0x33acc9, + 0x211307, + 0x347986, + 0x3c1109, + 0x2c78c8, + 0x331004, + 0x241286, + 0x230106, + 0x3a00582, + 0x3a234f, + 0x21f4ce, + 0x226484, + 0x2c1545, + 0x31a705, + 0x2f6809, + 0x244689, + 0x357c07, + 0x22bbc6, + 0x206dc3, + 0x3e03942, + 0x21d6c3, + 0x220d4a, + 0x21fbc3, + 0x3bde45, + 0x2f2542, + 0x370749, + 0x4200282, + 0x216c84, + 0x2ef006, + 0x2bb6c5, + 0x2d7c04, + 0x4a14344, + 0x205583, + 0x2374c4, + 0x4e02b82, + 0x267184, + 0x527eac4, + 0x39004a, + 0x5600cc2, + 0x35c447, + 0x2774c8, + 0x6207ec2, + 0x340687, + 0x2bde44, + 0x2bde47, + 0x3b9605, + 0x339407, + 0x31ca86, + 0x325384, + 0x3314c5, + 0x298307, + 0x720fc02, + 0x335a43, + 0x21ab82, + 0x3aae43, + 0x7612442, + 0x27f485, + 0x7a023c2, + 0x293584, + 0x276005, + 0x2263c7, + 0x20974e, + 0x2391c4, + 0x238cc4, + 0x20b583, + 0x364209, + 0x30e2cb, + 0x259e48, + 0x3c0ec8, + 0x316488, + 0x215cc8, + 0x330e4a, + 0x339307, + 0x309d86, + 0x7e6e442, + 0x345243, + 0x355943, + 0x35d344, + 0x3a7a43, + 0x32f6c3, + 0x172a782, + 0x8203102, + 0x27b385, + 0x28df86, + 0x2a9f04, + 0x369187, + 0x23ce86, + 0x3806c4, + 0x3806c7, + 0x205a83, + 0x86c31c2, + 0x8b14902, + 0x8e21182, + 0x221186, + 0x9200882, + 0x286c45, + 0x32bcc3, + 0x3c6444, + 0x2e3804, + 0x2e3805, + 0x2053c3, + 0x96b6c03, + 0x9a09342, + 0x289b05, + 0x289b0b, + 0x20bd06, + 0x331f4b, + 0x22aa44, + 0x20cec9, + 0x20d784, + 0x9e0d9c2, + 0x20ef03, + 0x20fec3, + 0x1610702, + 0x2fb9c3, + 0x21070a, + 0xa200302, + 0x203dc5, + 0x2d400a, + 0x243384, + 0x210f03, + 0x212984, + 0x213b83, + 0x213b84, + 0x213b87, + 0x2153c5, + 0x215705, + 0x216d46, + 0x2170c6, + 0x217d43, + 0x21a708, + 0x212d43, + 0xa6004c2, + 0x22c3c8, + 0x3878cb, + 0x223088, + 0x225f06, + 0x227447, + 0x22a1c8, + 0xb604002, + 0xbaf21c2, + 0x23b388, + 0x3031c7, + 0x207a45, + 0x207a48, + 0x383c48, + 0x2fa9c3, + 0x22f384, + 0x35d382, + 0xbe2f582, + 0xc201bc2, + 0xca30502, + 0x230503, + 0xce03cc2, + 0x31b5c3, + 0x2f1b84, + 0x20bf83, + 0x335e04, + 0x322b8b, + 0x237c03, + 0x2db106, + 0x237c04, + 0x2e21ce, + 0x2669c5, + 0x33d7c8, + 0x251107, + 0x25110a, + 0x2342c3, + 0x34f747, + 0x30e485, + 0x2342c4, + 0x2d4b86, + 0x2d4b87, + 0x2d0204, + 0x37d587, + 0x209a84, + 0x340c44, + 0x340c46, + 0x25d944, + 0x39db46, + 0x207803, + 0x207808, + 0x21a988, + 0x238c83, + 0x2fb983, + 0x3a8c04, + 0x3ae4c3, + 0xd24d5c2, + 0xd6d2fc2, + 0x2083c3, + 0x205646, + 0x241383, + 0x354bc4, + 0xda4b182, + 0x24cb83, + 0x339c03, + 0x218882, + 0xde03c02, + 0x2c0b06, + 0x23c007, + 0x2eab45, + 0x38a504, + 0x2981c5, + 0x27e687, + 0x2d84c9, + 0x2dcd46, + 0x307788, + 0x2eaa46, + 0xe2010c2, + 0x2f1408, + 0x2f3e06, + 0x223a85, + 0x30fe07, + 0x310344, + 0x310345, + 0x2010c4, + 0x2010c8, + 0xe619382, + 0xea02642, + 0x3292c6, + 0x202648, + 0x34d485, + 0x34df06, + 0x350108, + 0x36d548, + 0xee1f8c5, + 0xf21d0c4, + 0x38ca87, + 0xf60d642, + 0xfaefa02, + 0x10e02c42, + 0x2ef105, + 0x373905, + 0x3c1546, + 0x3208c7, + 0x3973c7, + 0x1160be03, + 0x26f507, + 0x2b99c8, + 0x231a09, + 0x381b07, + 0x2321c7, + 0x232b08, + 0x233306, + 0x233dc6, + 0x234a0c, + 0x235e4a, + 0x2364c7, + 0x23834b, + 0x23be47, + 0x23be4e, + 0x1a23d104, + 0x23d744, + 0x23e847, + 0x2616c7, + 0x243806, + 0x243807, + 0x243c87, + 0x1a630a42, + 0x2449c6, + 0x2449ca, + 0x244f4b, + 0x246d07, + 0x2478c5, + 0x247c03, + 0x248146, + 0x248147, + 0x322643, + 0x1aa022c2, + 0x248a4a, + 0x1af68802, + 0x1b24d602, + 0x1b64afc2, + 0x1ba3e242, + 0x24cc85, + 0x24d2c4, + 0x1c204ac2, + 0x267205, + 0x245543, + 0x20d885, + 0x215bc4, + 0x20f984, + 0x209d86, + 0x2505c6, + 0x289d03, + 0x3b6d84, + 0x3ac2c3, + 0x1ca02e02, + 0x3582c4, + 0x3582c6, + 0x38d005, + 0x36e3c6, + 0x30ff08, + 0x227b84, + 0x397848, + 0x399a45, + 0x311708, + 0x36c6c6, + 0x265847, + 0x27b984, + 0x27b986, + 0x26f803, + 0x3917c3, + 0x20b648, + 0x31c684, + 0x354fc7, + 0x2d2906, + 0x2d2909, + 0x20a1c8, + 0x317908, + 0x338884, + 0x2067c3, + 0x23dd42, + 0x1da4c3c2, + 0x1de14202, + 0x207583, + 0x1e20a502, + 0x3225c4, + 0x2440c6, + 0x335b45, + 0x283403, + 0x234ec4, + 0x2b1a07, + 0x336bc3, + 0x37cfc8, + 0x21ea85, + 0x25f7c3, + 0x275f85, + 0x2760c4, + 0x2f9c06, + 0x222704, + 0x225986, + 0x226306, + 0x357d84, + 0x23c203, + 0x1e614582, + 0x238ac5, + 0x2011c3, + 0x1ea05ec2, + 0x2319c3, + 0x21c8c5, + 0x237583, + 0x237589, + 0x1ee01f02, + 0x1f608ac2, + 0x289645, + 0x219286, + 0x37c8c6, + 0x2bfcc8, + 0x2bfccb, + 0x20568b, + 0x21c145, + 0x2ead45, + 0x2c3909, + 0x1603142, + 0x357f48, + 0x23e504, + 0x1fe01b02, + 0x20aac3, + 0x20661886, + 0x224fc8, + 0x20a003c2, + 0x307348, + 0x20e0a6c2, + 0x23994a, + 0x212c8d03, + 0x39f286, + 0x3b5048, + 0x389ac8, + 0x3ba046, + 0x377d47, + 0x3a2547, + 0x23fe0a, + 0x243404, + 0x352f84, + 0x366b89, + 0x21ba1d45, + 0x21f6c6, + 0x200143, + 0x255184, + 0x21e25784, + 0x323307, + 0x22f607, + 0x364044, + 0x2d3345, + 0x3c1608, + 0x37b847, + 0x38fc87, + 0x22208882, + 0x23b9c4, + 0x28e948, + 0x24e244, + 0x252944, + 0x253005, + 0x253147, + 0x22b509, + 0x254004, + 0x2547c9, + 0x254a08, + 0x254f04, + 0x254f07, + 0x226553c3, + 0x255547, + 0x1626d02, + 0x16ad402, + 0x255e86, + 0x2564c7, + 0x256b04, + 0x258487, + 0x258f47, + 0x259783, + 0x329982, + 0x205dc2, + 0x270003, + 0x270004, + 0x27000b, + 0x3c0fc8, + 0x25f184, + 0x25ad05, + 0x25cac7, + 0x25e5c5, + 0x30590a, + 0x25f0c3, + 0x22a12c42, + 0x212c44, + 0x261489, + 0x265183, + 0x265247, + 0x2f61c9, + 0x336308, + 0x25d1c3, + 0x27a247, + 0x27aa89, + 0x26be83, + 0x281b04, + 0x283c89, + 0x287dc6, + 0x2266c3, + 0x2039c2, + 0x241243, + 0x2ad207, + 0x383fc5, + 0x340346, + 0x268984, + 0x2dba05, + 0x220d03, + 0x217f86, + 0x20d0c2, + 0x3a3984, + 0x22e2ab02, + 0x22ab03, + 0x23201802, + 0x252843, + 0x217544, + 0x217547, + 0x3c6746, + 0x255e42, + 0x23629942, + 0x384384, + 0x23a30b82, + 0x23e01a42, + 0x337304, + 0x337305, + 0x201a45, + 0x35ab46, + 0x24208742, + 0x208745, + 0x2100c5, + 0x210ac3, + 0x213d06, + 0x214885, + 0x221102, + 0x34db45, + 0x221104, + 0x227ac3, + 0x227d03, + 0x2460ad82, + 0x298507, + 0x33a504, + 0x33a509, + 0x255084, + 0x281903, + 0x35b109, + 0x281908, + 0x24b0cc04, + 0x30cc06, + 0x2a2c83, + 0x20cb03, + 0x30e843, + 0x24eefe82, + 0x375502, + 0x25201402, + 0x32d8c8, + 0x327088, + 0x3a8046, + 0x2544c5, + 0x34f5c5, + 0x31e0c7, + 0x229985, + 0x25cd82, + 0x25694cc2, + 0x1602202, + 0x240a88, + 0x34e285, + 0x27ca84, + 0x2e7205, + 0x241d87, + 0x25efc4, + 0x248942, + 0x25a2dac2, + 0x33e704, + 0x226ec7, + 0x289fc7, + 0x3393c4, + 0x291003, + 0x238bc4, + 0x238bc8, + 0x234106, + 0x2d4a0a, + 0x22b3c4, + 0x291508, + 0x288204, + 0x227546, + 0x294c84, + 0x2ef406, + 0x33a7c9, + 0x26d007, + 0x34e1c3, + 0x25eebfc2, + 0x331203, + 0x207c82, + 0x2625c982, + 0x30cf06, + 0x371e48, + 0x2a44c7, + 0x2f7209, + 0x290ac9, + 0x2a61c5, + 0x2a73c9, + 0x2a7b85, + 0x2a7cc9, + 0x2a9045, + 0x2aa008, + 0x266598c4, + 0x26a598c7, + 0x232583, + 0x2aa207, + 0x232586, + 0x2aa5c7, + 0x2a0f45, + 0x2ca8c3, + 0x26e35c02, + 0x2ea984, + 0x27230bc2, + 0x276552c2, + 0x2f3ac6, + 0x277445, + 0x2acac7, + 0x326403, + 0x32f644, + 0x2130c3, + 0x23b0c3, + 0x27a07d02, + 0x28206202, + 0x37b084, + 0x329943, + 0x24b905, + 0x28603882, + 0x28e00c42, + 0x2e0586, + 0x31c7c4, + 0x385444, + 0x38544a, + 0x29601342, + 0x38e2ca, + 0x39e948, + 0x29a6ff84, + 0x201fc3, + 0x208c43, + 0x3165c9, + 0x267709, + 0x2a6e06, + 0x29e14bc3, + 0x214bc5, + 0x39434d, + 0x39eb06, + 0x20e84b, + 0x2a200802, + 0x220b88, + 0x2ca1a802, + 0x2ce00942, + 0x2c9a85, + 0x2d205842, + 0x21b147, + 0x2b0747, + 0x214a43, + 0x348148, + 0x2d601102, + 0x29f384, + 0x291203, + 0x325545, + 0x395983, + 0x245646, + 0x223504, + 0x2fb943, + 0x2aec03, + 0x2da03202, + 0x2eacc4, + 0x3af385, + 0x2ace07, + 0x277e03, + 0x2ad9c3, + 0x2ae803, + 0x16ae8c2, + 0x2ae8c3, + 0x2aeb83, + 0x2de0b0c2, + 0x39e304, + 0x2507c6, + 0x22a443, + 0x2af343, + 0x2e2b0102, + 0x2b0108, + 0x2b03c4, + 0x2ee8c6, + 0x256947, + 0x3845c6, + 0x2a4f04, + 0x3be01ec2, + 0x23244b, + 0x2ff28e, + 0x219e0f, + 0x2c7b83, + 0x3c65fe82, + 0x1647302, + 0x3ca00a82, + 0x25b4c3, + 0x205983, + 0x2d8746, + 0x2f1946, + 0x3c2147, + 0x2f9084, + 0x3ce193c2, + 0x3d21edc2, + 0x2425c5, + 0x2e44c7, + 0x37fd86, + 0x3d64d542, + 0x30de04, + 0x2b7b43, + 0x3da09602, + 0x3df63443, + 0x2b8444, + 0x2bd289, + 0x16c2482, + 0x3e20dd82, + 0x327e05, + 0x3e6c2702, + 0x3ea00682, + 0x352307, + 0x214fc9, + 0x36754b, + 0x3a2305, + 0x26ad09, + 0x37e806, + 0x20bd47, + 0x3ee074c4, + 0x348c89, + 0x337b07, + 0x224c87, + 0x230803, + 0x2afc46, + 0x30a7c7, + 0x20fbc3, + 0x2f0d46, + 0x3f6038c2, + 0x3fa0e402, + 0x3bec83, + 0x32f245, + 0x332807, + 0x222386, + 0x383f45, + 0x2f3f04, + 0x278f45, + 0x2f2144, + 0x3fe00f82, + 0x341587, + 0x2f2984, + 0x26a444, + 0x34694d, + 0x26a449, + 0x230b08, + 0x25c404, + 0x335ec5, + 0x20a047, + 0x341144, + 0x23cf47, + 0x204cc5, + 0x402a4e44, + 0x30bcc5, + 0x263e44, + 0x390706, + 0x3206c5, + 0x406291c2, + 0x210fc4, + 0x210fc5, + 0x35d8c6, + 0x343b85, + 0x25d144, + 0x3c6103, + 0x20eb46, + 0x22b705, + 0x22f045, + 0x3207c4, + 0x22b443, + 0x22b44c, + 0x40aacf02, + 0x40e0a5c2, + 0x41201542, + 0x20f003, + 0x20f004, + 0x41604482, + 0x30ae88, + 0x340405, + 0x236184, + 0x243686, + 0x41a0e302, + 0x41e1de42, + 0x422000c2, + 0x2b2cc5, + 0x294346, + 0x229304, + 0x357946, + 0x35c206, + 0x222a83, + 0x4272850a, + 0x26b085, + 0x28b003, + 0x228606, + 0x304789, + 0x228607, + 0x292288, + 0x2c7789, + 0x31d348, + 0x250e46, + 0x209703, + 0x42a6f582, + 0x392c08, + 0x42e54ac2, + 0x43201e42, + 0x20be83, + 0x2d8345, + 0x26ba04, + 0x3b6fc9, + 0x2ee004, + 0x21b388, + 0x20dc03, + 0x323004, + 0x2a5fc3, + 0x2192c8, + 0x346887, + 0x43a25242, + 0x290ec2, + 0x31a685, + 0x39cf89, + 0x21f743, + 0x27bfc4, + 0x394304, + 0x20a0c3, + 0x27d04a, + 0x43f7c0c2, + 0x44210f82, + 0x2c3143, + 0x37ea83, + 0x1600082, + 0x200083, + 0x44603282, + 0x44a05a02, + 0x44e1a484, + 0x322046, + 0x2e07c6, + 0x245e44, + 0x277043, + 0x345c03, + 0x2ec1c3, + 0x2452c6, + 0x341d05, + 0x2c32c7, + 0x2c6445, + 0x2c7d86, + 0x2c8708, + 0x2c8906, + 0x24efc4, + 0x29960b, + 0x2cb583, + 0x2cb585, + 0x2cba08, + 0x21a202, + 0x352602, + 0x4524cd02, + 0x4560d682, + 0x219403, + 0x45a6cd82, + 0x26cd83, + 0x2cbd04, + 0x2cc543, + 0x462168c2, + 0x466d0e06, + 0x25e446, + 0x46ad0f42, + 0x46e0ff02, + 0x47227d42, + 0x4763a3c2, + 0x47a1b5c2, + 0x47e047c2, + 0x20dec3, + 0x358645, + 0x2b6306, + 0x48226444, + 0x38ce0a, + 0x3a0546, + 0x21c384, + 0x277943, + 0x48e02f02, + 0x2032c2, + 0x26fb43, + 0x4920ec83, + 0x2e6747, + 0x3205c7, + 0x4aa70107, + 0x393f87, + 0x22cd03, + 0x3176ca, + 0x251304, + 0x397504, + 0x39750a, + 0x247705, + 0x4ae1f682, + 0x258443, + 0x4b202002, + 0x228803, + 0x3311c3, + 0x4ba02742, + 0x26f484, + 0x220704, + 0x2046c5, + 0x3080c5, + 0x34e4c6, + 0x34e846, + 0x4be53982, + 0x4c201382, + 0x2f8545, + 0x25e152, + 0x33f206, + 0x25e8c3, + 0x39d486, + 0x2a1f45, + 0x1604842, + 0x54610c82, + 0x35e3c3, + 0x210c83, + 0x27e483, + 0x54a0c502, + 0x381c43, + 0x54e06e02, + 0x200843, + 0x39e348, + 0x285603, + 0x2a6046, + 0x23ecc7, + 0x30b986, + 0x30b98b, + 0x21c2c7, + 0x2ea784, + 0x55601c82, + 0x340285, + 0x55a09cc3, + 0x292c83, + 0x239b45, + 0x3175c3, + 0x55f175c6, + 0x3580ca, + 0x245ac3, + 0x23e004, + 0x202586, + 0x223e86, + 0x56241d03, + 0x32f507, + 0x2a6d07, + 0x29ae85, + 0x311986, + 0x22b743, + 0x58e13f43, + 0x59206f02, + 0x21a244, + 0x207609, + 0x240887, + 0x229a85, + 0x247d04, + 0x26c7c8, + 0x273b85, + 0x59676405, + 0x284e49, + 0x347a43, + 0x24d584, + 0x59a0b182, + 0x219603, + 0x59e94742, + 0x299986, + 0x162bac2, + 0x5a2a47c2, + 0x2b2bc8, + 0x3a76c3, + 0x30bc07, + 0x2ce245, + 0x2b2785, + 0x2d8e4b, + 0x2d9846, + 0x2d9046, + 0x2dc486, + 0x279f04, + 0x2dc6c6, + 0x5a6f0248, + 0x237cc3, + 0x201f83, + 0x201f84, + 0x2ddbc4, + 0x2dde87, + 0x2df2c5, + 0x5aadf402, + 0x5ae08302, + 0x208305, + 0x2bb184, + 0x2e298b, + 0x2e3708, + 0x298804, + 0x230982, + 0x5b64e882, + 0x24e883, + 0x2e3f04, + 0x2e41c5, + 0x2e4d07, + 0x2e6d44, + 0x21c184, + 0x5ba057c2, + 0x36b449, + 0x2e84c5, + 0x3a25c5, + 0x2e9045, + 0x5be19543, + 0x2e9d04, + 0x2e9d0b, + 0x2ea0c4, + 0x2ea38b, + 0x2ec105, + 0x219f4a, + 0x2ecec8, + 0x2ed0ca, + 0x2ed983, + 0x2ed98a, + 0x5c21fc42, + 0x5c648602, + 0x209943, + 0x5caf1382, + 0x2f1383, + 0x5cf6c182, + 0x5d32c442, + 0x2f1fc4, + 0x21a846, + 0x357685, + 0x2f3d83, + 0x31adc6, + 0x34f085, + 0x250ac4, + 0x5d600382, + 0x2aefc4, + 0x2c358a, + 0x398a07, + 0x3477c6, + 0x24f3c7, + 0x244a03, + 0x2b8488, + 0x3a1f8b, + 0x2bd905, + 0x27c785, + 0x27c786, + 0x2dd704, + 0x3b5288, + 0x21d343, + 0x230004, + 0x230007, + 0x2f4a06, + 0x31fa06, + 0x2e200a, + 0x254844, + 0x31104a, + 0x5db364c6, + 0x3364c7, + 0x25ad87, + 0x273544, + 0x273549, + 0x250485, + 0x31cf4b, + 0x2e1283, + 0x225b43, + 0x5de20b43, + 0x2344c4, + 0x5e200982, + 0x3a3006, + 0x5e6ca645, + 0x39d6c5, + 0x258c46, + 0x29cd44, + 0x5ea07bc2, + 0x247c44, + 0x5ee16f02, + 0x224645, + 0x23f4c4, + 0x228d83, + 0x5f610cc2, + 0x210cc3, + 0x267b86, + 0x5fa00a02, + 0x2073c8, + 0x228484, + 0x228486, + 0x37f306, + 0x25cb84, + 0x20eac5, + 0x21cfc8, + 0x220f87, + 0x2227c7, + 0x2227cf, + 0x28e846, + 0x309bc3, + 0x398184, + 0x233744, + 0x2101c3, + 0x227684, + 0x34fd84, + 0x5fe030c2, + 0x289a43, + 0x372e83, + 0x60207c02, + 0x25c503, + 0x322683, + 0x21578a, + 0x207c07, + 0x2534cc, + 0x253786, + 0x255246, + 0x256647, + 0x60632f47, + 0x25c889, + 0x22c504, + 0x25eb04, + 0x60a09f82, + 0x60e01282, + 0x2e23c6, + 0x32f304, + 0x2d3146, + 0x2333c8, + 0x239444, + 0x21b186, + 0x37c885, + 0x285048, + 0x205883, + 0x28a685, + 0x290cc3, + 0x3a26c3, + 0x3a26c4, + 0x212c03, + 0x6125fd82, + 0x61603a42, + 0x2e1149, + 0x299885, + 0x2a1084, + 0x2a4a45, + 0x212384, + 0x393607, + 0x353f05, + 0x2702c4, + 0x2702c8, + 0x2e61c6, + 0x2e9f84, + 0x2ede88, + 0x2f27c7, + 0x61a04042, + 0x316244, + 0x210284, + 0x224e87, + 0x61e04044, + 0x2c9002, + 0x6220ed42, + 0x221b83, + 0x2d37c4, + 0x29bb43, + 0x2aacc5, + 0x6262e642, + 0x2fddc5, + 0x23a382, + 0x390c85, + 0x372005, + 0x62a04e02, + 0x339b84, + 0x62e06a42, + 0x3aba46, + 0x3a7346, + 0x39d0c8, + 0x2be888, + 0x2f3a44, + 0x303685, + 0x316049, + 0x2eadc4, + 0x358084, + 0x2b6983, + 0x6320fd85, + 0x2c2547, + 0x21fc84, + 0x3ae54d, + 0x2f4682, + 0x3b35c3, + 0x2f4683, + 0x63601b42, + 0x396e45, + 0x223747, + 0x2b9604, + 0x394047, + 0x2c7989, + 0x2c36c9, + 0x275247, + 0x202bc3, + 0x3a7508, + 0x25b949, + 0x2f5487, + 0x2f5805, + 0x2f6706, + 0x2f6d46, + 0x2f6ec5, + 0x26a545, + 0x63a00d42, + 0x2b7685, + 0x2b3a08, + 0x2c08c6, + 0x63e872c7, + 0x2ec344, + 0x2b8047, + 0x2f9206, + 0x6420a402, + 0x35d5c6, + 0x2fc9ca, + 0x2fd245, + 0x646da942, + 0x64a4eb02, + 0x30ab06, + 0x386548, + 0x64e8a187, + 0x6523c902, + 0x215c43, + 0x20c246, + 0x229144, + 0x3b2f86, + 0x201746, + 0x34290a, + 0x325e45, + 0x3559c6, + 0x39ec43, + 0x39ec44, + 0x202c02, + 0x31c743, + 0x6560f042, + 0x30b743, + 0x38e544, + 0x2b2484, + 0x38668a, + 0x214c43, + 0x277208, + 0x250f0a, + 0x23f747, + 0x300986, + 0x260484, + 0x21c242, + 0x2a3702, + 0x65a02982, + 0x238b83, + 0x25ab47, + 0x202987, + 0x284d04, + 0x3a4f87, + 0x2e4e06, + 0x221287, + 0x303304, + 0x399d85, + 0x292705, + 0x65e1b2c2, + 0x3c50c6, + 0x223443, + 0x22a4c2, + 0x22a4c6, + 0x66222342, + 0x6660e982, + 0x3bb945, + 0x66a27882, + 0x66e01c42, + 0x334e85, + 0x2c51c5, + 0x355a85, + 0x289043, + 0x244185, + 0x2d9907, + 0x2feb45, + 0x370005, + 0x33d8c4, + 0x310806, + 0x381d44, + 0x67202a82, + 0x67ee7585, + 0x2a3ac7, + 0x34f408, + 0x261146, + 0x26114d, + 0x2674c9, + 0x2674d2, + 0x300585, + 0x309f43, + 0x6820c202, + 0x30ee44, + 0x39eb83, + 0x33b0c5, + 0x2fdf05, + 0x68630882, + 0x25f803, + 0x68a51b02, + 0x692d6142, + 0x69602242, + 0x2a1d45, + 0x394183, + 0x3c4f08, + 0x69a0ad42, + 0x69e0c842, + 0x26f446, + 0x317c4a, + 0x20e043, + 0x25d0c3, + 0x337d03, + 0x6aa04182, + 0x78e0c542, + 0x79600d82, + 0x206d02, + 0x35d3c9, + 0x2c18c4, + 0x2a9348, + 0x79af3dc2, + 0x79e01ac2, + 0x2ea5c5, + 0x238788, + 0x39e488, + 0x268b8c, + 0x23f683, + 0x7a263802, + 0x7a611d82, + 0x270d46, + 0x301805, + 0x2787c3, + 0x253c06, + 0x301946, + 0x29bc83, + 0x303b03, + 0x303f46, + 0x304b84, + 0x239a46, + 0x214a05, + 0x214a0a, + 0x24c1c4, + 0x305244, + 0x305b8a, + 0x7aa04982, + 0x24c345, + 0x30798a, + 0x308305, + 0x308bc4, + 0x308cc6, + 0x308e44, + 0x2198c6, + 0x7ae308c2, + 0x2f3446, + 0x341ac5, + 0x325cc7, + 0x3adf46, + 0x256844, + 0x2d2387, + 0x328446, + 0x241a05, + 0x241a07, + 0x3aed07, + 0x3aed0e, + 0x2ebb06, + 0x23ce05, + 0x203f87, + 0x20ff43, + 0x20ff47, + 0x217945, + 0x22f484, + 0x22f5c2, + 0x246087, + 0x2f9104, + 0x244dc4, + 0x290d4b, + 0x220003, + 0x2e58c7, + 0x220004, + 0x2e6047, + 0x2903c3, + 0x33ca0d, + 0x3998c8, + 0x2297c4, + 0x2701c5, + 0x30b205, + 0x30b643, + 0x7b228382, + 0x30d5c3, + 0x30da83, + 0x321c04, + 0x27ab85, + 0x3c5247, + 0x39ecc6, + 0x37c1c3, + 0x22a60b, + 0x30e04b, + 0x2a5c8b, + 0x2fa5cb, + 0x2bd60a, + 0x30548b, + 0x3245cb, + 0x360a8c, + 0x384f4b, + 0x3c4351, + 0x3c5d4a, + 0x30f5cb, + 0x30f88c, + 0x30fb8b, + 0x31010a, + 0x311bca, + 0x312bce, + 0x31324b, + 0x31350a, + 0x3145d1, + 0x314a0a, + 0x314f0b, + 0x31544e, + 0x315d8c, + 0x316b8b, + 0x316e4e, + 0x3171cc, + 0x318d4a, + 0x31a04c, + 0x7b71a34a, + 0x31af48, + 0x31ba49, + 0x32368a, + 0x32390a, + 0x323b8b, + 0x328b4e, + 0x328ed1, + 0x330349, + 0x33058a, + 0x330bcb, + 0x332a4a, + 0x333296, + 0x334b8b, + 0x33784a, + 0x33818a, + 0x33908b, + 0x33ab49, + 0x33d389, + 0x33de0d, + 0x33e48b, + 0x33f38b, + 0x33fd4b, + 0x343d49, + 0x34438e, + 0x34500a, + 0x34a4ca, + 0x34a7ca, + 0x34afcb, + 0x34b80b, + 0x34bacd, + 0x34d18d, + 0x34d7d0, + 0x34dc8b, + 0x34f9cc, + 0x34fe8b, + 0x351e0b, + 0x35344e, + 0x353a0b, + 0x353a0d, + 0x35964b, + 0x35a0cf, + 0x35a48b, + 0x35acca, + 0x35b3c9, + 0x35ba89, + 0x35cd4b, + 0x35d00e, + 0x35e88b, + 0x35f64f, + 0x36160b, + 0x3618cb, + 0x361b8b, + 0x36238a, + 0x367149, + 0x36a18f, + 0x36f54c, + 0x37038c, + 0x37108e, + 0x37158f, + 0x37194e, + 0x3722d0, + 0x3726cf, + 0x3731ce, + 0x373f8c, + 0x374292, + 0x375211, + 0x375a0e, + 0x375e8e, + 0x3763ce, + 0x37674f, + 0x376b0e, + 0x376e93, + 0x377351, + 0x37778c, + 0x377a8e, + 0x377f0c, + 0x378513, + 0x378ed0, + 0x37970c, + 0x379a0c, + 0x379ecb, + 0x37ac8e, + 0x37b18b, + 0x37b5cb, + 0x37ca4c, + 0x3825ca, + 0x38474c, + 0x384a4c, + 0x384d49, + 0x387e0b, + 0x3880c8, + 0x388889, + 0x38888f, + 0x38a08b, + 0x7bb8afca, + 0x38e8cc, + 0x38fa89, + 0x390a48, + 0x39100b, + 0x39158b, + 0x39220a, + 0x39248b, + 0x39298c, + 0x393d48, + 0x39a40b, + 0x39d80b, + 0x3a114e, + 0x3a27cb, + 0x3a410b, + 0x3ae88b, + 0x3aeb49, + 0x3af08d, + 0x3b368a, + 0x3b45d7, + 0x3b5cd8, + 0x3b9749, + 0x3bb58b, + 0x3bc1d4, + 0x3bc6cb, + 0x3bcc4a, + 0x3bd14a, + 0x3bd3cb, + 0x3bf610, + 0x3bfa11, + 0x3c00ca, + 0x3c394d, + 0x3c404d, + 0x3c61cb, + 0x3c6a06, + 0x3c51c3, + 0x7bf74a03, + 0x2dd1c6, + 0x245a05, + 0x252087, + 0x324486, + 0x1601182, + 0x2cbe89, + 0x31abc4, + 0x2d8988, + 0x220a83, + 0x30ed87, + 0x201c02, + 0x2acb03, + 0x7c200dc2, + 0x2c4946, + 0x2c5c84, + 0x21a604, + 0x349a43, + 0x349a45, + 0x7cac2742, + 0x7cea8044, + 0x273487, + 0x7d22f442, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x20ae43, + 0x200742, + 0xcd588, + 0x202c42, + 0x30e843, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0x207c03, + 0x32eb56, + 0x356d13, + 0x3a4e09, + 0x38c988, + 0x340109, + 0x307b06, + 0x33e750, + 0x248c93, + 0x2f4ac8, + 0x2a5687, + 0x2b6f87, + 0x278c8a, + 0x38e5c9, + 0x342549, + 0x28b30b, + 0x31ca86, + 0x20850a, + 0x225f06, + 0x31a7c3, + 0x298445, + 0x207808, + 0x3abb0d, + 0x2ef1cc, + 0x35cac7, + 0x312f0d, + 0x21d0c4, + 0x23478a, + 0x23598a, + 0x235e4a, + 0x21fa07, + 0x243507, + 0x245fc4, + 0x27b986, + 0x3264c4, + 0x2e01c8, + 0x2ee049, + 0x2bfcc6, + 0x2bfcc8, + 0x24944d, + 0x2c3909, + 0x389ac8, + 0x3a2547, + 0x2f1c0a, + 0x2564c6, + 0x260fc7, + 0x306a04, + 0x214707, + 0x3105ca, + 0x378a0e, + 0x229985, + 0x3bfe0b, + 0x300389, + 0x267709, + 0x2b0587, + 0x3694ca, + 0x224dc7, + 0x2ff3c9, + 0x31e5c8, + 0x239e8b, + 0x2d8345, + 0x2309ca, + 0x227b09, + 0x3abe0a, + 0x2c64cb, + 0x21460b, + 0x28b095, + 0x306745, + 0x3a25c5, + 0x2e9d0a, + 0x2a6f0a, + 0x300107, + 0x2388c3, + 0x2e2348, + 0x2cf00a, + 0x228486, + 0x25b789, + 0x285048, + 0x2e9f84, + 0x29bb49, + 0x2be888, + 0x36c607, + 0x2e7586, + 0x2a3ac7, + 0x2ac6c7, + 0x2450c5, + 0x2297cc, + 0x2701c5, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x20be03, + 0x20ec83, + 0x20ae43, + 0x241d03, + 0x20be03, + 0x20ec83, + 0xae43, + 0x285603, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0xcd588, + 0x202c42, + 0x209d42, + 0x236082, + 0x201102, + 0x2013c2, + 0x2db482, + 0x460be03, + 0x237583, + 0x203d43, + 0x30e843, + 0x214bc3, + 0x21f743, + 0x2d1206, + 0x20ec83, + 0x241d03, + 0x238843, + 0xcd588, + 0x323584, + 0x322dc7, + 0x34a403, + 0x2402c4, + 0x21b903, + 0x283cc3, + 0x30e843, + 0x15da87, + 0x1221c4, + 0x121b83, + 0xf45, + 0x200742, + 0xb6c03, + 0x5a02c42, + 0x1488d09, + 0x891cd, + 0x8950d, + 0x236082, + 0x6ff84, + 0xf89, + 0x200342, + 0x5f8d588, + 0xe9484, + 0xcd588, + 0x1426502, + 0x1508546, + 0x233603, + 0x2b8283, + 0x660be03, + 0x234784, + 0x6a37583, + 0x6f0e843, + 0x207d02, + 0x26ff84, + 0x20ec83, + 0x2fbbc3, + 0x2056c2, + 0x241d03, + 0x21c4c2, + 0x2f1f03, + 0x200a02, + 0x29d2c3, + 0x26f883, + 0x20fc42, + 0xcd588, + 0x233603, + 0x2fbbc3, + 0x2056c2, + 0x2f1f03, + 0x200a02, + 0x29d2c3, + 0x26f883, + 0x20fc42, + 0x2f1f03, + 0x200a02, + 0x29d2c3, + 0x26f883, + 0x20fc42, + 0x20be03, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x226444, + 0x20ec83, + 0x241d03, + 0x204bc2, + 0x219543, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x20ec83, + 0x241d03, + 0x2f5805, + 0x230882, + 0x200742, + 0xcd588, + 0x1455908, + 0x1367ca, + 0x30e843, + 0x200001, + 0x202081, + 0x200ec1, + 0x200f01, + 0x200f41, + 0x20d701, + 0x312181, + 0x203801, + 0x24b241, + 0x2021c1, + 0x200101, + 0x200301, + 0x117485, + 0xcd588, + 0x200781, + 0x2014c1, + 0x200041, + 0x200141, + 0x201401, + 0x200901, + 0x200541, + 0x200c01, + 0x200a81, + 0x200641, + 0x200081, + 0x2001c1, + 0x200341, + 0x201681, + 0x20ab41, + 0x2002c1, + 0x200a01, + 0x200401, + 0x200441, + 0x201ac1, + 0x203f81, + 0x20d601, + 0x201181, + 0x200dc1, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x20be03, + 0x237583, + 0x200342, + 0x241d03, + 0x15da87, + 0x1f847, + 0x29546, + 0x4160a, + 0x88348, + 0x5a588, + 0x5aa47, + 0x86, + 0xd61c5, + 0x14a345, + 0x7dac6, + 0x157206, + 0x28b304, + 0x340547, + 0xcd588, + 0x2d2484, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x31a548, + 0x31e084, + 0x2374c4, + 0x22aa44, + 0x270c47, + 0x2cde07, + 0x20be03, + 0x23d74b, + 0x31b7ca, + 0x31cd47, + 0x2fc048, + 0x3255c8, + 0x237583, + 0x346c47, + 0x203d43, + 0x37c208, + 0x335049, + 0x26ff84, + 0x214bc3, + 0x2dce48, + 0x21f743, + 0x2cb6ca, + 0x2d1206, + 0x3a0547, + 0x20ec83, + 0x2da606, + 0x309308, + 0x241d03, + 0x28d806, + 0x2e394d, + 0x2e49c8, + 0x2ea0cb, + 0x331e86, + 0x348087, + 0x20f605, + 0x2ef98a, + 0x22bfc5, + 0x36210a, + 0x230882, + 0x203f83, + 0x244dc4, + 0x2021c6, + 0x3a7a03, + 0x2af043, + 0x24be43, + 0x23b003, + 0x349183, + 0x200582, + 0x2d7285, + 0x2a6589, + 0x245743, + 0x205583, + 0x202fc3, + 0x200301, + 0x2a1a85, + 0x39da83, + 0x2053c3, + 0x22aa44, + 0x326443, + 0x214948, + 0x2ec443, + 0x302e8d, + 0x2ebbc8, + 0x21ab46, + 0x31c783, + 0x378983, + 0x381cc3, + 0xaa0be03, + 0x236dc8, + 0x23d744, + 0x246d03, + 0x2022c6, + 0x249bc8, + 0x202e03, + 0x2ef9c3, + 0x2319c3, + 0x237583, + 0x21d8c3, + 0x21e903, + 0x21a303, + 0x31c703, + 0x2b25c3, + 0x225783, + 0x370645, + 0x256c04, + 0x258107, + 0x329982, + 0x25a303, + 0x25d486, + 0x25ed03, + 0x25f3c3, + 0x276543, + 0x202043, + 0x323283, + 0x269687, + 0xaf0e843, + 0x2363c3, + 0x2096c3, + 0x204d03, + 0x26ff83, + 0x2f3783, + 0x374ac5, + 0x363fc3, + 0x246889, + 0x20b0c3, + 0x2fe203, + 0xb2527c3, + 0x286d03, + 0x21cd08, + 0x2a64c6, + 0x200706, + 0x29aa46, + 0x27a5c7, + 0x200c83, + 0x20be83, + 0x21f743, + 0x288446, + 0x21a202, + 0x29ea43, + 0x32dd05, + 0x20ec83, + 0x2a2e47, + 0x160ae43, + 0x24e483, + 0x21fa83, + 0x225e03, + 0x241d03, + 0x212e46, + 0x31d286, + 0x36aa43, + 0x22ba83, + 0x219543, + 0x253743, + 0x303b83, + 0x2f0603, + 0x2f20c3, + 0x34f085, + 0x24f3c3, + 0x2d3246, + 0x23eb08, + 0x225b43, + 0x341789, + 0x33a308, + 0x2110c8, + 0x21a185, + 0x32a38a, + 0x35400a, + 0x37cd8b, + 0x37d408, + 0x2fb903, + 0x2f2103, + 0x33b1c3, + 0x366d88, + 0x2f4e83, + 0x39ec44, + 0x261983, + 0x202983, + 0x22d483, + 0x26fcc3, + 0x238843, + 0x230882, + 0x22d0c3, + 0x23f683, + 0x305403, + 0x3065c4, + 0x244dc4, + 0x3be143, + 0xcd588, + 0x200742, + 0x200602, + 0x200582, + 0x203402, + 0x2023c2, + 0x200782, + 0x238c02, + 0x201b02, + 0x202542, + 0x2000c2, + 0x225242, + 0x20d682, + 0x26cd82, + 0x206f02, + 0x2db482, + 0x20b182, + 0x201f82, + 0x2057c2, + 0x2f5f42, + 0x208102, + 0x200982, + 0x219e82, + 0x207bc2, + 0x207c02, + 0x201282, + 0x20fd82, + 0x201c42, + 0x742, + 0x602, + 0x582, + 0x3402, + 0x23c2, + 0x782, + 0x38c02, + 0x1b02, + 0x2542, + 0xc2, + 0x25242, + 0xd682, + 0x6cd82, + 0x6f02, + 0xdb482, + 0xb182, + 0x1f82, + 0x57c2, + 0xf5f42, + 0x8102, + 0x982, + 0x19e82, + 0x7bc2, + 0x7c02, + 0x1282, + 0xfd82, + 0x1c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x3f82, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x241d03, + 0xc60be03, + 0x30e843, + 0x21f743, + 0xaff03, + 0x223b82, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0xaff03, + 0x241d03, + 0xdc2, + 0x142f49, + 0x202382, + 0x15bda05, + 0x2eaa02, + 0xcd588, + 0x2c42, + 0x23bfc2, + 0x200482, + 0x244482, + 0x21f682, + 0x253982, + 0x14a345, + 0x203082, + 0x2056c2, + 0x20c502, + 0x203042, + 0x20b182, + 0x392a82, + 0x20ed42, + 0x24eb42, + 0x15da87, + 0x120a8d, + 0xd6249, + 0x6898b, + 0xd97c8, + 0x60b89, + 0xfeec6, + 0x30e843, + 0xcd588, + 0x1221c4, + 0x121b83, + 0xf45, + 0xcd588, + 0x5b646, + 0xf89, + 0xab07, + 0x200742, + 0x28b304, + 0x202c42, + 0x20be03, + 0x209d42, + 0x237583, + 0x202542, + 0x2d2484, + 0x214bc3, + 0x254ac2, + 0x20ec83, + 0x200342, + 0x241d03, + 0x3a25c6, + 0x32414f, + 0x70ec03, + 0xcd588, + 0x202c42, + 0x203d43, + 0x30e843, + 0x21f743, + 0xae43, + 0x14ef74b, + 0x141650a, + 0x14eca47, + 0x78d4b, + 0xd7e45, + 0x15da87, + 0x202c42, + 0x20be03, + 0x30e843, + 0x20ec83, + 0x200742, + 0x211a42, + 0x209342, + 0xfe0be03, + 0x2442c2, + 0x237583, + 0x226d02, + 0x22ab02, + 0x30e843, + 0x25cd82, + 0x251942, + 0x2a8002, + 0x211742, + 0x28d302, + 0x2029c2, + 0x200902, + 0x2ebfc2, + 0x278142, + 0x25c982, + 0x2ad9c2, + 0x2fcdc2, + 0x223482, + 0x23d082, + 0x21f743, + 0x205a02, + 0x20ec83, + 0x211e82, + 0x2c9fc2, + 0x241d03, + 0x2457c2, + 0x207c02, + 0x209f82, + 0x203a42, + 0x204e02, + 0x2da942, + 0x21b2c2, + 0x251b02, + 0x2234c2, + 0x31350a, + 0x35acca, + 0x38bf0a, + 0x3c6b82, + 0x20f2c2, + 0x374a82, + 0x103358c9, + 0x1072f70a, + 0x14328c7, + 0x10a03fc2, + 0x1410983, + 0x3342, + 0x12f70a, + 0x253404, + 0x1120be03, + 0x237583, + 0x254a04, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x20ec83, + 0x1aec5, + 0x20ae43, + 0x241d03, + 0x24f3c3, + 0x203f83, + 0xcd588, + 0x1400004, + 0x149845, + 0x142f49, + 0xa8ca, + 0x119fc2, + 0x19cbc6, + 0x187251, + 0x11b358c9, + 0x1498c8, + 0x1c1948, + 0x1fbc7, + 0x282, + 0x11748b, + 0x18c40a, + 0x844a, + 0x2aa47, + 0xcd588, + 0x10c788, + 0xd547, + 0x18419a4b, + 0x1c787, + 0x4c2, + 0x5e87, + 0x23a8a, + 0x1f8cf, + 0x8308f, + 0xefa02, + 0x2c42, + 0x173908, + 0xf698a, + 0x12b48, + 0x5fcc8, + 0xd3708, + 0x2e02, + 0x1bda8f, + 0x9dc8b, + 0x7e948, + 0x3c2c7, + 0x127c0a, + 0xf400b, + 0x78449, + 0x127b07, + 0x12a48, + 0x1541cc, + 0x3a347, + 0x17a28a, + 0x67008, + 0xf6f8e, + 0x2954e, + 0x2a88b, + 0x2e28b, + 0x30e8b, + 0x50b89, + 0xe32cb, + 0xeb5cd, + 0x17d18b, + 0x198c8d, + 0x19900d, + 0x3cc4a, + 0x44c0b, + 0x4638b, + 0x49ec5, + 0x18828e50, + 0x15770f, + 0x3b4cf, + 0xfb1cd, + 0x39610, + 0xa6c2, + 0x18e071c8, + 0x1f6c8, + 0x192ec405, + 0x5400b, + 0x12e350, + 0x59c88, + 0x12c4a, + 0x2e449, + 0x66007, + 0x66347, + 0x66507, + 0x66887, + 0x67347, + 0x67947, + 0x68187, + 0x68547, + 0x68e87, + 0x69187, + 0x69847, + 0x69a07, + 0x69bc7, + 0x69d87, + 0x6a087, + 0x6a687, + 0x6af47, + 0x6b707, + 0x6bcc7, + 0x6bf87, + 0x6c147, + 0x6c447, + 0x6cc47, + 0x6ce47, + 0x6dd87, + 0x6df47, + 0x6e107, + 0x6ebc7, + 0x6f0c7, + 0x6fd87, + 0x70687, + 0x71147, + 0x71647, + 0x71807, + 0x71c07, + 0x72447, + 0x726c7, + 0x72ac7, + 0x72c87, + 0x72e47, + 0x73287, + 0x73e87, + 0x743c7, + 0x74947, + 0x74b07, + 0x74e87, + 0x75407, + 0xd0c2, + 0x5fdca, + 0xdc547, + 0x84785, + 0xb3111, + 0x10ac6, + 0x10cc0a, + 0x17378a, + 0x5b646, + 0xcb0b, + 0x1402, + 0x34111, + 0xb29c9, + 0x948c9, + 0xebfc2, + 0x71e8a, + 0xa5a89, + 0xa61cf, + 0xa67ce, + 0xa7708, + 0x552c2, + 0x549, + 0x18b4ce, + 0xfc6cc, + 0xdbe0f, + 0x1a814e, + 0x1840c, + 0x25589, + 0x26751, + 0x2f988, + 0x1109d2, + 0x1115cd, + 0x1545cd, + 0x43f8b, + 0x4bad5, + 0x52c49, + 0x5438a, + 0x5ee89, + 0x6b310, + 0x7cc8b, + 0x85ecf, + 0xf0c0b, + 0x16130c, + 0x1b2610, + 0x9208a, + 0x9e90d, + 0x9fc4e, + 0xa9bca, + 0xab6cc, + 0xac394, + 0xb2651, + 0xbb04b, + 0xe1ecf, + 0xca50d, + 0x1a720e, + 0x16c4cc, + 0x18618c, + 0xb234b, + 0xb428e, + 0xb4d50, + 0xb584b, + 0xbaa8d, + 0xbb4cf, + 0xbef4c, + 0xbfb4e, + 0xc0411, + 0xdff4c, + 0x10d8c7, + 0xc738d, + 0xd000c, + 0xd65d0, + 0xdb80d, + 0x18acc7, + 0xe6310, + 0xf9348, + 0xfd44b, + 0x17d9cf, + 0x142188, + 0x10ce0d, + 0x190c10, + 0xf5f89, + 0x196af346, + 0xb0303, + 0xb5b05, + 0x9602, + 0x143709, + 0x5c40a, + 0x106606, + 0x2098a, + 0x1991f309, + 0x264c3, + 0xd2711, + 0xd2b49, + 0xd3ec7, + 0x1873cb, + 0xdae90, + 0xdb34c, + 0xdc2c8, + 0xdcc45, + 0x11e748, + 0x1afe8a, + 0x26587, + 0x140947, + 0x1382, + 0x12f04a, + 0x3b809, + 0x71505, + 0xa2cca, + 0x8a0cf, + 0x4794b, + 0x174b8c, + 0x1a252, + 0x9df05, + 0xdf0c8, + 0x13a60a, + 0x19ee8f05, + 0x17478c, + 0x12c443, + 0x192a82, + 0xf258a, + 0x14f2d8c, + 0x3a6c8, + 0x198e48, + 0x15da07, + 0x16f02, + 0xa02, + 0x49fd0, + 0x653c7, + 0x1282, + 0x333cf, + 0x7dac6, + 0x79a8e, + 0xdeb8b, + 0x6e308, + 0xa9dc9, + 0xf5012, + 0x18998d, + 0x1be608, + 0x68849, + 0x6a20d, + 0x6c5c9, + 0x6c98b, + 0x6e4c8, + 0x73c88, + 0x76248, + 0x79dc9, + 0x79fca, + 0x7b48c, + 0x17010a, + 0x103bc7, + 0x2fdcd, + 0xf7a8b, + 0x11a9cc, + 0x1979c8, + 0x4d3c9, + 0x13d8d0, + 0xc842, + 0x521cd, + 0x4182, + 0xc542, + 0x103b0a, + 0x10cb0a, + 0x10ec8b, + 0x4654c, + 0x10c28a, + 0x10c50e, + 0x121ccd, + 0xb6a08, + 0xdc2, + 0x11e0340e, + 0x1272184e, + 0x12f4960a, + 0x13742c0e, + 0x13f374ce, + 0x147ac40c, + 0x14328c7, + 0x14328c9, + 0x1410983, + 0x14eb784c, + 0x15727309, + 0x15f69bc9, + 0x1660a6c9, + 0x3342, + 0x3351, + 0x121791, + 0x14954d, + 0x142b51, + 0x137411, + 0x1ac34f, + 0xb778f, + 0x12724c, + 0x169b0c, + 0xa60c, + 0x1654cd, + 0x10e595, + 0x5a00c, + 0x1ba48c, + 0x138c90, + 0x155e8c, + 0x15dc0c, + 0x17a659, + 0x180a19, + 0x19f3d9, + 0x1b57d4, + 0x1bbcd4, + 0x3ed4, + 0x4ed4, + 0xb814, + 0x16e5a0c9, + 0x17404189, + 0x17fba549, + 0x1222fb89, + 0x3342, + 0x12a2fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x1322fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x13a2fb89, + 0x3342, + 0x1422fb89, + 0x3342, + 0x14a2fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x1522fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x15a2fb89, + 0x3342, + 0x1622fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x16a2fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x1722fb89, + 0x3342, + 0x17a2fb89, + 0x3342, + 0x1822fb89, + 0x3342, + 0x3eca, + 0x3342, + 0x187245, + 0x18c404, + 0x340e, + 0x12184e, + 0x14960a, + 0x142c0e, + 0x1374ce, + 0x1ac40c, + 0xb784c, + 0x127309, + 0x169bc9, + 0xa6c9, + 0x5a0c9, + 0x4189, + 0x1ba549, + 0x10e78d, + 0x5189, + 0xbac9, + 0x116a84, + 0x118c44, + 0x13aa44, + 0x18e7c4, + 0x79004, + 0x98884, + 0x477c4, + 0x143c44, + 0x1fbc4, + 0x157cd03, + 0xa6c2, + 0x121cc3, + 0x2e02, + 0x200742, + 0x202c42, + 0x209d42, + 0x208882, + 0x202542, + 0x200342, + 0x200a02, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff83, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x20ec83, + 0x241d03, + 0x1a9c3, + 0x30e843, + 0x6ff84, + 0x200742, + 0x2b6c03, + 0x1be0be03, + 0x2394c7, + 0x30e843, + 0x20f003, + 0x226444, + 0x20ec83, + 0x241d03, + 0x22d50a, + 0x3a25c5, + 0x219543, + 0x20e982, + 0xcd588, + 0xcd588, + 0x2c42, + 0x129242, + 0x1c74660b, + 0x5fc5, + 0x1f8c5, + 0xf9fc6, + 0x1221c4, + 0x121b83, + 0xf45, + 0x117485, + 0xcd588, + 0x1c787, + 0xbe03, + 0x1ce41447, + 0x143146, + 0x1d149445, + 0x143207, + 0xf84a, + 0xf708, + 0x13407, + 0x68348, + 0x98647, + 0xf28f, + 0x47f87, + 0x4e786, + 0x12e350, + 0x12cf0f, + 0x1c009, + 0x106684, + 0x1d5432ce, + 0xa978c, + 0xf420a, + 0x785c7, + 0xd9f8a, + 0x11e909, + 0xada0c, + 0x1bdf0a, + 0x5cc0a, + 0xf89, + 0x106606, + 0x7868a, + 0x11d84a, + 0x9a209, + 0xd1fc8, + 0xd22c6, + 0xd6c0d, + 0xb7cc5, + 0xab07, + 0xfb709, + 0x1a3207, + 0x10bd94, + 0xfdb4b, + 0x7e78a, + 0xa358d, + 0xf283, + 0xf283, + 0x29546, + 0xf283, + 0xb6c03, + 0xcd588, + 0x2c42, + 0x54a04, + 0x5da83, + 0xf5805, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x205583, + 0x20be03, + 0x237583, + 0x203d43, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x294a83, + 0x203f83, + 0x205583, + 0x28b304, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x235cc3, + 0x20be03, + 0x237583, + 0x208883, + 0x203d43, + 0x30e843, + 0x26ff84, + 0x3c32c3, + 0x20be83, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x219543, + 0x20c283, + 0x1f20be03, + 0x237583, + 0x24e683, + 0x30e843, + 0x211343, + 0x20be83, + 0x241d03, + 0x2057c3, + 0x317f04, + 0xcd588, + 0x1fa0be03, + 0x237583, + 0x2a77c3, + 0x30e843, + 0x21f743, + 0x226444, + 0x20ec83, + 0x241d03, + 0x232f43, + 0xcd588, + 0x2020be03, + 0x237583, + 0x203d43, + 0x20ae43, + 0x241d03, + 0xcd588, + 0x14328c7, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x226444, + 0x20ec83, + 0x241d03, + 0x142f49, + 0x117485, + 0x15da87, + 0x10bfcb, + 0xd2f44, + 0xb7cc5, + 0x1455908, + 0xa7e0d, + 0x21676405, + 0x8f204, + 0x10ec3, + 0xf5e85, + 0x31cc45, + 0xcd588, + 0xf282, + 0x3a283, + 0xefec6, + 0x31b0c8, + 0x397247, + 0x28b304, + 0x346046, + 0x3699c6, + 0xcd588, + 0x312ec3, + 0x23aec9, + 0x265555, + 0x6555f, + 0x20be03, + 0x3ba052, + 0x10db06, + 0x14fc85, + 0x12c4a, + 0x2e449, + 0x3b9e0f, + 0x2d2484, + 0x225285, + 0x2fdfd0, + 0x38cb87, + 0x20ae43, + 0x310f08, + 0x157146, + 0x2a47ca, + 0x22d244, + 0x2e8943, + 0x3a25c6, + 0x20e982, + 0x3987cb, + 0xae43, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x2f0ec3, + 0x202c42, + 0xee203, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20f003, + 0x227b03, + 0x241d03, + 0x202c42, + 0x20be03, + 0x237583, + 0x20ec83, + 0xae43, + 0x241d03, + 0x200742, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x1f8c5, + 0x28b304, + 0x20be03, + 0x237583, + 0x21a484, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x20be03, + 0x237583, + 0x203d43, + 0x204d03, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x357d43, + 0x3cf83, + 0xf003, + 0x20ec83, + 0x241d03, + 0x31350a, + 0x333049, + 0x3524cb, + 0x352b4a, + 0x35acca, + 0x3686cb, + 0x37bfca, + 0x3825ca, + 0x38bf0a, + 0x38c18b, + 0x3afbc9, + 0x3b1a0a, + 0x3b1d8b, + 0x3bc98b, + 0x3c5b0a, + 0x20be03, + 0x237583, + 0x203d43, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0x18754b, + 0x60308, + 0x14f209, + 0xcd588, + 0x20be03, + 0x266004, + 0x206302, + 0x226444, + 0x201485, + 0x205583, + 0x28b304, + 0x20be03, + 0x23d744, + 0x237583, + 0x254a04, + 0x2d2484, + 0x26ff84, + 0x20be83, + 0x20ec83, + 0x241d03, + 0x252385, + 0x235cc3, + 0x219543, + 0x2b5d83, + 0x2702c4, + 0x2020c4, + 0x3c0885, + 0xcd588, + 0x320f04, + 0x39db46, + 0x2010c4, + 0x202c42, + 0x38fd87, + 0x256087, + 0x252944, + 0x25e5c5, + 0x2dba05, + 0x232585, + 0x26ff84, + 0x27a688, + 0x23c806, + 0x3c5f88, + 0x278185, + 0x2d8345, + 0x251304, + 0x241d03, + 0x2e9484, + 0x367486, + 0x3a26c3, + 0x2702c4, + 0x362205, + 0x26e984, + 0x23fd84, + 0x20e982, + 0x397746, + 0x3a4b06, + 0x301805, + 0x200742, + 0x2b6c03, + 0x27e02c42, + 0x207344, + 0x202542, + 0x21f743, + 0x23a3c2, + 0x20ec83, + 0x200342, + 0x207c03, + 0x203f83, + 0xcd588, + 0xcd588, + 0x30e843, + 0x200742, + 0x28a02c42, + 0x30e843, + 0x2574c3, + 0x3c32c3, + 0x2168c4, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x200742, + 0x29202c42, + 0x20be03, + 0x20ec83, + 0xae43, + 0x241d03, + 0x982, + 0x20c202, + 0x230882, + 0x20f003, + 0x2e2d83, + 0x200742, + 0x117485, + 0xcd588, + 0x15da87, + 0x202c42, + 0x237583, + 0x254a04, + 0x206c03, + 0x30e843, + 0x204d03, + 0x21f743, + 0x20ec83, + 0x207783, + 0x241d03, + 0x2388c3, + 0xb5cd3, + 0x1b9994, + 0x15da87, + 0x102dc6, + 0x5c60b, + 0x29546, + 0x5a3c7, + 0x2809, + 0x195d4a, + 0x8820d, + 0x12078c, + 0x104fca, + 0x14a345, + 0xf888, + 0x7dac6, + 0x6ff06, + 0x157206, + 0x20a6c2, + 0x1c170c, + 0x18c5c7, + 0x282d1, + 0x20be03, + 0x682c5, + 0x8808, + 0x22644, + 0x2a507646, + 0xb3106, + 0xd95c6, + 0x8d5ca, + 0x19dac3, + 0x2aa48c44, + 0x27c5, + 0x15cc83, + 0x2ae38a07, + 0x1aec5, + 0xcbcc, + 0xed348, + 0x6f6cb, + 0x2b25168c, + 0x140d6c3, + 0xb8888, + 0x9db09, + 0x3ff48, + 0x14208c6, + 0x2b76d609, + 0xd7e4a, + 0x10d08, + 0xf9fc8, + 0x1fbc4, + 0x118b45, + 0x6f807, + 0x2ba6f803, + 0x2bf39c86, + 0x2c2e9d04, + 0x2c790207, + 0xf9fc4, + 0xf9fc4, + 0xf9fc4, + 0xf9fc4, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x200742, + 0x202c42, + 0x30e843, + 0x207d02, + 0x20ec83, + 0x241d03, + 0x207c03, + 0x37158f, + 0x37194e, + 0xcd588, + 0x20be03, + 0x49a07, + 0x237583, + 0x30e843, + 0x214bc3, + 0x20ec83, + 0x241d03, + 0x220443, + 0x322887, + 0x203d02, + 0x292889, + 0x200602, + 0x24a2cb, + 0x2cf44a, + 0x28d009, + 0x200182, + 0x3418c6, + 0x235295, + 0x24a415, + 0x236793, + 0x24a993, + 0x203942, + 0x222dc5, + 0x3ab48c, + 0x27410b, + 0x2a2205, + 0x203402, + 0x2f2542, + 0x37e706, + 0x200282, + 0x261bc6, + 0x212ecd, + 0x21ac4c, + 0x228ec4, + 0x200cc2, + 0x2149c2, + 0x310d88, + 0x2023c2, + 0x211446, + 0x35c704, + 0x235455, + 0x236913, + 0x2108c3, + 0x32508a, + 0x20df47, + 0x30eec9, + 0x2d9d07, + 0x314902, + 0x200882, + 0x3b4b46, + 0x2099c2, + 0xcd588, + 0x210702, + 0x200302, + 0x217a07, + 0x336087, + 0x21c485, + 0x2004c2, + 0x2da6c7, + 0x220488, + 0x204002, + 0x2f21c2, + 0x230502, + 0x203cc2, + 0x23e988, + 0x20bf83, + 0x25dc48, + 0x20bf8d, + 0x237c03, + 0x23bc48, + 0x237c0f, + 0x237fce, + 0x38feca, + 0x2d1311, + 0x2d1790, + 0x38360d, + 0x38394c, + 0x3452c7, + 0x325207, + 0x346109, + 0x228fc2, + 0x200782, + 0x25becc, + 0x25c1cb, + 0x203c02, + 0x2b2506, + 0x2010c2, + 0x202642, + 0x2efa02, + 0x202c42, + 0x231fc4, + 0x240647, + 0x230a42, + 0x245207, + 0x2475c7, + 0x21bc02, + 0x21b282, + 0x2498c5, + 0x204ac2, + 0x2e72ce, + 0x2a384d, + 0x237583, + 0x28400e, + 0x3b868d, + 0x348003, + 0x202ec2, + 0x2817c4, + 0x238c42, + 0x202e82, + 0x372a45, + 0x37b407, + 0x24d902, + 0x208882, + 0x254607, + 0x257688, + 0x329982, + 0x29df86, + 0x25bd4c, + 0x25c08b, + 0x212c42, + 0x26208f, + 0x262450, + 0x26284f, + 0x262c15, + 0x263154, + 0x26364e, + 0x2639ce, + 0x263d4f, + 0x26410e, + 0x264494, + 0x264993, + 0x264e4d, + 0x2755c9, + 0x289843, + 0x201802, + 0x215f45, + 0x206c06, + 0x202542, + 0x344e47, + 0x30e843, + 0x201402, + 0x36dfc8, + 0x2d1551, + 0x2d1990, + 0x200c42, + 0x270f87, + 0x205842, + 0x341287, + 0x209602, + 0x348f89, + 0x37e6c7, + 0x2a4b48, + 0x307486, + 0x2e2c83, + 0x326e05, + 0x20e402, + 0x202682, + 0x3b4f45, + 0x3c1485, + 0x200f82, + 0x214d03, + 0x26ea07, + 0x208007, + 0x2085c2, + 0x22e684, + 0x20b4c3, + 0x20b4c9, + 0x20f108, + 0x201542, + 0x204482, + 0x2e3547, + 0x33d705, + 0x293988, + 0x222a87, + 0x201cc3, + 0x298106, + 0x38348d, + 0x38380c, + 0x2e0646, + 0x200482, + 0x26f582, + 0x201e42, + 0x237a8f, + 0x237e8e, + 0x2dba87, + 0x200b82, + 0x3517c5, + 0x3517c6, + 0x203282, + 0x205a02, + 0x28ad86, + 0x292ac3, + 0x3411c6, + 0x2c3ec5, + 0x2c3ecd, + 0x2c4495, + 0x2c4e8c, + 0x2c59cd, + 0x2c5d92, + 0x20d682, + 0x26cd82, + 0x2047c2, + 0x21ce86, + 0x2fc586, + 0x201382, + 0x206c86, + 0x20c502, + 0x20d245, + 0x2013c2, + 0x2a3949, + 0x21d70c, + 0x21da4b, + 0x200342, + 0x258508, + 0x20cb42, + 0x206f02, + 0x271946, + 0x22fb05, + 0x31f507, + 0x250d85, + 0x2982c5, + 0x249a82, + 0x204c02, + 0x20b182, + 0x2dc107, + 0x24f4cd, + 0x24f84c, + 0x34f687, + 0x22bac2, + 0x201f82, + 0x23d488, + 0x343888, + 0x303d48, + 0x30cdc4, + 0x2b4507, + 0x2e3c83, + 0x24e882, + 0x204882, + 0x2e6b09, + 0x2f7387, + 0x2057c2, + 0x271d45, + 0x248602, + 0x209942, + 0x2bca43, + 0x2bca46, + 0x2f0602, + 0x2f1e82, + 0x201442, + 0x3b33c6, + 0x3454c7, + 0x205e42, + 0x200382, + 0x25da8f, + 0x283e4d, + 0x38b8ce, + 0x3b850c, + 0x2017c2, + 0x200502, + 0x3072c5, + 0x311d86, + 0x209002, + 0x208102, + 0x200982, + 0x222a04, + 0x2dcdc4, + 0x3c23c6, + 0x200a02, + 0x2b7307, + 0x231d03, + 0x231d08, + 0x2326c8, + 0x243e07, + 0x2ecbc6, + 0x204042, + 0x23e683, + 0x23e687, + 0x28a8c6, + 0x2f3045, + 0x30d148, + 0x206a42, + 0x341687, + 0x20fd82, + 0x2f4682, + 0x20c142, + 0x2f1149, + 0x20a402, + 0x201742, + 0x24adc3, + 0x325ec7, + 0x2040c2, + 0x21d88c, + 0x21db8b, + 0x2e06c6, + 0x35cbc5, + 0x227882, + 0x201c42, + 0x2ba046, + 0x22e983, + 0x331547, + 0x20cb82, + 0x202a82, + 0x235115, + 0x24a5d5, + 0x236653, + 0x24ab13, + 0x25d207, + 0x274548, + 0x274550, + 0x28744f, + 0x373ad3, + 0x28cdd2, + 0x292450, + 0x2b350f, + 0x2fd6d2, + 0x3af491, + 0x2af493, + 0x3938d2, + 0x2c3b0f, + 0x2cd74e, + 0x2cf252, + 0x2d09d1, + 0x2d3b0f, + 0x2d528e, + 0x2dc811, + 0x2dd7d0, + 0x2ed512, + 0x2f0f51, + 0x2f2206, + 0x2f3907, + 0x38e407, + 0x200d02, + 0x27efc5, + 0x3713c7, + 0x230882, + 0x20f6c2, + 0x22d0c5, + 0x200443, + 0x200446, + 0x24f68d, + 0x24f9cc, + 0x206d02, + 0x3ab30b, + 0x273fca, + 0x22358a, + 0x2b9489, + 0x2e530b, + 0x222bcd, + 0x2fe44c, + 0x2ec88a, + 0x27500c, + 0x294d4b, + 0x2a204c, + 0x2f968b, + 0x2b9e83, + 0x2f4f06, + 0x3b9942, + 0x2f3dc2, + 0x20e343, + 0x201ac2, + 0x207203, + 0x24ec86, + 0x262dc7, + 0x2ad706, + 0x2f6b48, + 0x343588, + 0x2ca146, + 0x211d82, + 0x3011cd, + 0x30150c, + 0x2d2547, + 0x304e07, + 0x23c242, + 0x219742, + 0x23e602, + 0x257a42, + 0x202c42, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x226444, + 0x20ec83, + 0x241d03, + 0x207c03, + 0x200742, + 0x201482, + 0x2e68ecc5, + 0x2ea8e4c5, + 0x2efb3086, + 0xcd588, + 0x2f2afb05, + 0x202c42, + 0x209d42, + 0x2f726285, + 0x2fa7cb85, + 0x2fe7d647, + 0x302867c9, + 0x30667d84, + 0x202542, + 0x201402, + 0x30b0dec5, + 0x30e95f49, + 0x31327988, + 0x316ac205, + 0x31af0707, + 0x31e21cc8, + 0x322def85, + 0x3266d246, + 0x32b6d849, + 0x32ed4ec8, + 0x332bf988, + 0x3369658a, + 0x33a75e04, + 0x33f7c545, + 0x342bc308, + 0x34727e05, + 0x217f42, + 0x34a061c3, + 0x34ea2606, + 0x35311408, + 0x356eee46, + 0x35b643c8, + 0x35eb6306, + 0x363c2f44, + 0x2032c2, + 0x366f1587, + 0x36aa8644, + 0x36e77e87, + 0x3723ecc7, + 0x200342, + 0x3769ae85, + 0x37a403c4, + 0x37ee1787, + 0x383a3387, + 0x38681606, + 0x38a7d205, + 0x38e96047, + 0x392e5a48, + 0x396162c7, + 0x39b94949, + 0x39ec51c5, + 0x3a2b4107, + 0x3a68e306, + 0x3aa941c8, + 0x227d8d, + 0x251989, + 0x272fcb, + 0x27ac8b, + 0x2a78cb, + 0x2da98b, + 0x311f8b, + 0x31224b, + 0x312889, + 0x31378b, + 0x313a4b, + 0x313fcb, + 0x314c8a, + 0x3151ca, + 0x3157cc, + 0x31938b, + 0x319dca, + 0x33080a, + 0x33b28e, + 0x33be8e, + 0x33c20a, + 0x33e14a, + 0x33eb4b, + 0x33ee0b, + 0x33fa8b, + 0x35edcb, + 0x35f3ca, + 0x36008b, + 0x36034a, + 0x3605ca, + 0x36084a, + 0x37d74b, + 0x3856cb, + 0x388f8e, + 0x38930b, + 0x391f4b, + 0x392e0b, + 0x39a6ca, + 0x39a949, + 0x39ab8a, + 0x39c6ca, + 0x3b06cb, + 0x3b204b, + 0x3b2a0a, + 0x3b390b, + 0x3b904b, + 0x3c554b, + 0x3ae7fd48, + 0x3b287989, + 0x3b69d989, + 0x3bad8988, + 0x34c805, + 0x200583, + 0x22a3c4, + 0x217c05, + 0x267ac6, + 0x26cfc5, + 0x286284, + 0x344d48, + 0x30b505, + 0x290604, + 0x2064c7, + 0x29cf0a, + 0x266b4a, + 0x2dbb87, + 0x20c4c7, + 0x2fd2c7, + 0x282187, + 0x2f8c45, + 0x3b6e46, + 0x386007, + 0x244e44, + 0x2df546, + 0x2df446, + 0x204745, + 0x3389c4, + 0x2975c6, + 0x29bfc7, + 0x22df06, + 0x27c8c7, + 0x250803, + 0x3912c6, + 0x234f05, + 0x27d747, + 0x26a84a, + 0x26e7c4, + 0x21bd88, + 0x2b8a49, + 0x2e0d07, + 0x319c46, + 0x258788, + 0x2ef589, + 0x30f084, + 0x33a484, + 0x29ef05, + 0x2ba648, + 0x2c2807, + 0x2b3e49, + 0x22dc08, + 0x2f2306, + 0x310806, + 0x297f88, + 0x362bc6, + 0x28e4c5, + 0x2816c6, + 0x278988, + 0x237986, + 0x25af0b, + 0x2c7c06, + 0x299b0d, + 0x369405, + 0x2a8506, + 0x21f085, + 0x331b49, + 0x3a6cc7, + 0x318308, + 0x2a1e46, + 0x298d89, + 0x33ffc6, + 0x26a7c5, + 0x24c486, + 0x288b86, + 0x2c6e49, + 0x31e2c6, + 0x29cc07, + 0x245e85, + 0x203983, + 0x25b085, + 0x299dc7, + 0x3ab746, + 0x369309, + 0x3b3086, + 0x26b146, + 0x213fc9, + 0x2810c9, + 0x29fac7, + 0x200908, + 0x2b2f49, + 0x27ec48, + 0x330a46, + 0x2d1d85, + 0x240c8a, + 0x26b1c6, + 0x239346, + 0x2cac05, + 0x2d4888, + 0x22b287, + 0x233f0a, + 0x254f86, + 0x251dc5, + 0x3324c6, + 0x224507, + 0x319b07, + 0x2835c5, + 0x26a985, + 0x395a06, + 0x3b8c06, + 0x2fa846, + 0x2bc7c4, + 0x280449, + 0x288806, + 0x2c814a, + 0x227248, + 0x36fd08, + 0x266b4a, + 0x212505, + 0x29bf05, + 0x2dd048, + 0x2c9688, + 0x233907, + 0x2ba946, + 0x32bf88, + 0x309507, + 0x27f348, + 0x2b5706, + 0x281e48, + 0x295586, + 0x278307, + 0x33a206, + 0x2975c6, + 0x22ecca, + 0x232046, + 0x2d1d89, + 0x2ee586, + 0x35c00a, + 0x3c2f49, + 0x27dd86, + 0x2b8304, + 0x21600d, + 0x287c07, + 0x239c06, + 0x2bf845, + 0x340045, + 0x37f306, + 0x2e15c9, + 0x2d4407, + 0x279406, + 0x306886, + 0x286309, + 0x2a3204, + 0x242544, + 0x3c2a88, + 0x24f046, + 0x271348, + 0x2e8008, + 0x29f447, + 0x3b6589, + 0x2faa47, + 0x2af9ca, + 0x2e79cf, + 0x31194a, + 0x3070c5, + 0x278bc5, + 0x218b05, + 0x35c647, + 0x2240c3, + 0x200b08, + 0x21e646, + 0x21e749, + 0x2d8646, + 0x2c8547, + 0x298b49, + 0x318208, + 0x2cacc7, + 0x30eb43, + 0x34c885, + 0x224045, + 0x2bc60b, + 0x327ec4, + 0x2d6884, + 0x276bc6, + 0x30f407, + 0x38f4ca, + 0x206247, + 0x20c347, + 0x27cb85, + 0x3c6485, + 0x282609, + 0x2975c6, + 0x2060cd, + 0x31e505, + 0x2b18c3, + 0x20b003, + 0x3a4d45, + 0x351305, + 0x258788, + 0x27a347, + 0x2422c6, + 0x29d606, + 0x22de05, + 0x237847, + 0x3c1d47, + 0x23c6c7, + 0x37c5ca, + 0x391388, + 0x2bc7c4, + 0x257bc7, + 0x27bb07, + 0x33f086, + 0x2692c7, + 0x2a1808, + 0x395f08, + 0x329b06, + 0x20c708, + 0x2cfbc4, + 0x386006, + 0x370d86, + 0x36bd46, + 0x277806, + 0x29b244, + 0x282246, + 0x2be246, + 0x297986, + 0x2060c6, + 0x20aec6, + 0x2a1646, + 0x2421c8, + 0x385a88, + 0x2cdac8, + 0x26d1c8, + 0x2dcfc6, + 0x212305, + 0x39e746, + 0x2ac285, + 0x396f87, + 0x22dcc5, + 0x213c03, + 0x38e045, + 0x33dd04, + 0x20b005, + 0x247643, + 0x33c4c7, + 0x30d708, + 0x27c986, + 0x2c930d, + 0x278b86, + 0x296f45, + 0x222083, + 0x2bbcc9, + 0x2a3386, + 0x291706, + 0x271e04, + 0x3118c7, + 0x23a1c6, + 0x2d46c5, + 0x21af83, + 0x3be4c4, + 0x27bcc6, + 0x3b6f44, + 0x370e88, + 0x3459c9, + 0x2317c9, + 0x29ed0a, + 0x2a05cd, + 0x2118c7, + 0x2391c6, + 0x20f984, + 0x2867c9, + 0x284ac8, + 0x287806, + 0x241906, + 0x2692c7, + 0x2d9346, + 0x22a046, + 0x347086, + 0x23ed4a, + 0x221cc8, + 0x22f885, + 0x2a2fc9, + 0x27f84a, + 0x2ff648, + 0x29b6c8, + 0x291688, + 0x29d24c, + 0x3124c5, + 0x29d888, + 0x385d86, + 0x24c9c6, + 0x35eb07, + 0x206145, + 0x281845, + 0x231689, + 0x2139c7, + 0x21e705, + 0x22aec7, + 0x20b003, + 0x2c2d45, + 0x2151c8, + 0x280d47, + 0x29b589, + 0x2e9f85, + 0x33e384, + 0x2a0288, + 0x2f16c7, + 0x2cae88, + 0x3aac88, + 0x2e1dc5, + 0x21e546, + 0x29d706, + 0x3a7009, + 0x2cb3c7, + 0x2ac8c6, + 0x206e87, + 0x239fc3, + 0x267d84, + 0x2cfcc5, + 0x2f3f84, + 0x246804, + 0x27ffc7, + 0x340d87, + 0x26dc84, + 0x29b3d0, + 0x31d507, + 0x3c6485, + 0x2561cc, + 0x224a04, + 0x2c4c88, + 0x278209, + 0x375886, + 0x240088, + 0x21ca84, + 0x276ec8, + 0x234506, + 0x22eb48, + 0x29a086, + 0x28854b, + 0x38ddc5, + 0x2cfb48, + 0x2173c4, + 0x345e0a, + 0x29b589, + 0x33a106, + 0x218bc8, + 0x25ed85, + 0x31dec4, + 0x2c4b86, + 0x23c588, + 0x27fd48, + 0x32c806, + 0x3c2344, + 0x240c06, + 0x2faac7, + 0x277d87, + 0x2692cf, + 0x205847, + 0x27de47, + 0x351685, + 0x35e345, + 0x29f789, + 0x382e46, + 0x27d885, + 0x2813c7, + 0x3934c8, + 0x2c7645, + 0x33a206, + 0x227088, + 0x2eee4a, + 0x3bf088, + 0x28ab07, + 0x2e7e06, + 0x2a2f86, + 0x202583, + 0x20de03, + 0x27fa09, + 0x2b2dc9, + 0x2c4a86, + 0x2e9f85, + 0x36bac8, + 0x218bc8, + 0x362d48, + 0x34710b, + 0x2c9547, + 0x309149, + 0x269548, + 0x350284, + 0x318648, + 0x28c889, + 0x2acbc5, + 0x35c547, + 0x267e05, + 0x27fc48, + 0x28eb4b, + 0x295d90, + 0x2a8145, + 0x21730c, + 0x242485, + 0x27cc03, + 0x2b1d06, + 0x2bd884, + 0x2404c6, + 0x29bfc7, + 0x227104, + 0x248688, + 0x2009cd, + 0x2dfc05, + 0x211904, + 0x28f244, + 0x28f249, + 0x2ae548, + 0x31bc47, + 0x234588, + 0x280508, + 0x279705, + 0x21f2c7, + 0x279687, + 0x23ac87, + 0x26a989, + 0x346dc9, + 0x272146, + 0x383b46, + 0x269506, + 0x33b6c5, + 0x3aa4c4, + 0x3bd646, + 0x3c4c46, + 0x279748, + 0x2241cb, + 0x26e687, + 0x20f984, + 0x23a106, + 0x2a1b47, + 0x335405, + 0x3583c5, + 0x223884, + 0x346d46, + 0x3bd6c8, + 0x2867c9, + 0x2091c6, + 0x2848c8, + 0x2d4786, + 0x350908, + 0x2ce58c, + 0x2795c6, + 0x296c0d, + 0x29708b, + 0x29ccc5, + 0x3c1e87, + 0x31e3c6, + 0x3199c8, + 0x2721c9, + 0x329dc8, + 0x3c6485, + 0x208947, + 0x27ed48, + 0x24ff89, + 0x2a5586, + 0x24da8a, + 0x319748, + 0x329c0b, + 0x2ccd8c, + 0x276fc8, + 0x27b286, + 0x21dfc8, + 0x2eeac7, + 0x205989, + 0x2f084d, + 0x2974c6, + 0x31dd48, + 0x385949, + 0x2bc8c8, + 0x281f48, + 0x2bec8c, + 0x2bff87, + 0x2c0a47, + 0x26a7c5, + 0x2b4807, + 0x393388, + 0x2c4c06, + 0x20904c, + 0x2ec1c8, + 0x2c8c48, + 0x250286, + 0x223dc7, + 0x272344, + 0x26d1c8, + 0x2b6d0c, + 0x28430c, + 0x307145, + 0x2047c7, + 0x3c22c6, + 0x223d46, + 0x331d08, + 0x367784, + 0x22df0b, + 0x2b744b, + 0x2e7e06, + 0x200847, + 0x322385, + 0x271285, + 0x22e046, + 0x25ed45, + 0x327e85, + 0x2c6c87, + 0x270a09, + 0x3b8dc4, + 0x25f405, + 0x2de045, + 0x2add08, + 0x2da405, + 0x287109, + 0x2c9ac7, + 0x2c9acb, + 0x24fbc6, + 0x241f09, + 0x338908, + 0x291f85, + 0x23ad88, + 0x346e08, + 0x2570c7, + 0x208e47, + 0x280049, + 0x22ea87, + 0x2aa389, + 0x2b7dcc, + 0x394848, + 0x2d4d09, + 0x2d6447, + 0x2805c9, + 0x340ec7, + 0x2cce88, + 0x3b6745, + 0x385f86, + 0x2bf888, + 0x30d3c8, + 0x27f709, + 0x327ec7, + 0x256d85, + 0x2301c9, + 0x201c46, + 0x28e304, + 0x326006, + 0x311288, + 0x328747, + 0x2243c8, + 0x20c7c9, + 0x325b87, + 0x29d0c6, + 0x3c1f44, + 0x38e0c9, + 0x21f148, + 0x250147, + 0x2adf86, + 0x224106, + 0x2392c4, + 0x26d846, + 0x20af83, + 0x38d949, + 0x38dd86, + 0x20ca45, + 0x29d606, + 0x2c7205, + 0x27f1c8, + 0x2ee987, + 0x2eb146, + 0x3262c6, + 0x36fd08, + 0x29f907, + 0x297505, + 0x29b1c8, + 0x3b2448, + 0x319748, + 0x242345, + 0x386006, + 0x231589, + 0x3a6e84, + 0x2c708b, + 0x229d4b, + 0x22f789, + 0x20b003, + 0x25cf45, + 0x22abc6, + 0x242cc8, + 0x34e904, + 0x27c986, + 0x37c709, + 0x2f0405, + 0x2c6bc6, + 0x2f16c6, + 0x20c984, + 0x2a86ca, + 0x20c988, + 0x30d3c6, + 0x2934c5, + 0x331287, + 0x351547, + 0x21e544, + 0x229f87, + 0x22dc84, + 0x22dc86, + 0x200b43, + 0x26a985, + 0x37dc85, + 0x364648, + 0x257d85, + 0x279309, + 0x26d007, + 0x26d00b, + 0x2a240c, + 0x2a2a0a, + 0x2f0707, + 0x205c83, + 0x2ebcc8, + 0x242505, + 0x2c76c5, + 0x34c944, + 0x2ccd86, + 0x278206, + 0x26d887, + 0x23f8cb, + 0x29b244, + 0x2d7404, + 0x2c2784, + 0x2c6986, + 0x227104, + 0x2ba748, + 0x34c745, + 0x258a45, + 0x362c87, + 0x3c1f89, + 0x351305, + 0x37f30a, + 0x245d89, + 0x2d698a, + 0x23ee89, + 0x3a5104, + 0x306945, + 0x2d9448, + 0x2e184b, + 0x29ef05, + 0x2f3206, + 0x247684, + 0x279846, + 0x325a09, + 0x2a1c47, + 0x3b3248, + 0x2a0946, + 0x2faa47, + 0x27fd48, + 0x37f886, + 0x334f84, + 0x371c87, + 0x361205, + 0x373507, + 0x21c984, + 0x31e346, + 0x2e5bc8, + 0x297248, + 0x2e44c7, + 0x24e388, + 0x295645, + 0x20ae44, + 0x266a48, + 0x24e484, + 0x208e45, + 0x2f8e44, + 0x309607, + 0x2888c7, + 0x280708, + 0x2cb006, + 0x257d05, + 0x279108, + 0x3bf288, + 0x29ec49, + 0x22a046, + 0x233f88, + 0x345c8a, + 0x335488, + 0x2def85, + 0x225446, + 0x245c48, + 0x208a0a, + 0x229207, + 0x285dc5, + 0x28e508, + 0x2cc2c4, + 0x2d4906, + 0x2c0dc8, + 0x20aec6, + 0x31fc48, + 0x25b247, + 0x2063c6, + 0x2b8304, + 0x2a6b87, + 0x2b0d44, + 0x3259c7, + 0x2a524d, + 0x22f805, + 0x2e13cb, + 0x284586, + 0x258608, + 0x248644, + 0x2ee246, + 0x27bcc6, + 0x21e307, + 0x2968cd, + 0x24b947, + 0x2b1808, + 0x286985, + 0x364e48, + 0x2c2786, + 0x2956c8, + 0x354486, + 0x336b47, + 0x2c5689, + 0x353e07, + 0x287ac8, + 0x2733c5, + 0x21c508, + 0x223c85, + 0x2f7505, + 0x23f105, + 0x24c4c3, + 0x277884, + 0x28e705, + 0x36d849, + 0x36b906, + 0x2a1908, + 0x208c05, + 0x2b46c7, + 0x29f14a, + 0x2c6b09, + 0x288a8a, + 0x2cdb48, + 0x22ad0c, + 0x28144d, + 0x34a703, + 0x31fb48, + 0x3be485, + 0x2eec06, + 0x318086, + 0x2deac5, + 0x206f89, + 0x3ab885, + 0x279108, + 0x25e046, + 0x3532c6, + 0x2a0149, + 0x3a0f87, + 0x28ee06, + 0x29f0c8, + 0x36bc48, + 0x2d8b87, + 0x2be3ce, + 0x2c29c5, + 0x24fe85, + 0x20adc8, + 0x3269c7, + 0x208f82, + 0x2be804, + 0x2403ca, + 0x250208, + 0x346f46, + 0x298c88, + 0x29d706, + 0x31da88, + 0x2ac8c8, + 0x2f74c4, + 0x2b4a85, + 0x6010c4, + 0x6010c4, + 0x6010c4, + 0x200a43, + 0x223f86, + 0x2795c6, + 0x29c98c, + 0x201343, + 0x21c986, + 0x200b04, + 0x2a3308, + 0x37c545, + 0x2404c6, + 0x2bc408, + 0x2cef86, + 0x2eb0c6, + 0x339f08, + 0x2cfd47, + 0x22e849, + 0x2a714a, + 0x211644, + 0x22dcc5, + 0x2b3e05, + 0x2c5406, + 0x211906, + 0x29c706, + 0x2f8686, + 0x22e984, + 0x22e98b, + 0x22d744, + 0x242085, + 0x2ab5c5, + 0x29f506, + 0x369808, + 0x281307, + 0x38dd04, + 0x2076c3, + 0x2cbdc5, + 0x22dac7, + 0x28120b, + 0x364547, + 0x2bc308, + 0x2b4bc7, + 0x26be06, + 0x251c48, + 0x26f24b, + 0x217b46, + 0x216b09, + 0x26f3c5, + 0x30eb43, + 0x2c6bc6, + 0x25b148, + 0x20c843, + 0x22dbc3, + 0x27fd46, + 0x29d706, + 0x36808a, + 0x27b2c5, + 0x27bb0b, + 0x29d54b, + 0x247b03, + 0x220043, + 0x2af944, + 0x2a88c7, + 0x25b1c4, + 0x240084, + 0x385c04, + 0x335788, + 0x293408, + 0x20dd89, + 0x2c5248, + 0x23f387, + 0x2060c6, + 0x2a154f, + 0x2c2b06, + 0x2cd084, + 0x29324a, + 0x22d9c7, + 0x2b0e46, + 0x28e349, + 0x20dd05, + 0x364785, + 0x20de46, + 0x21c643, + 0x2cc309, + 0x221e46, + 0x20c589, + 0x38f4c6, + 0x26a985, + 0x307545, + 0x205843, + 0x2a8a08, + 0x31be07, + 0x21e644, + 0x2a3188, + 0x24c744, + 0x39a286, + 0x2b1d06, + 0x2445c6, + 0x2cfa09, + 0x2c7645, + 0x2975c6, + 0x21afc9, + 0x393086, + 0x2a1646, + 0x395846, + 0x203a45, + 0x2f8e46, + 0x336b44, + 0x3b6745, + 0x2bf884, + 0x2b2206, + 0x31e4c4, + 0x200d03, + 0x284b85, + 0x238888, + 0x2509c7, + 0x34e989, + 0x285cc8, + 0x297d51, + 0x2f174a, + 0x2e7d47, + 0x396246, + 0x200b04, + 0x2bf988, + 0x26d9c8, + 0x297f0a, + 0x286ecd, + 0x24c486, + 0x33a006, + 0x2a6c46, + 0x283447, + 0x2b18c5, + 0x341987, + 0x2009c5, + 0x2c9c04, + 0x2a7586, + 0x26d6c7, + 0x2cc00d, + 0x245b87, + 0x344c48, + 0x279409, + 0x225346, + 0x2a5505, + 0x2fa084, + 0x311386, + 0x21e446, + 0x250386, + 0x299508, + 0x21d683, + 0x208d83, + 0x341f45, + 0x257e46, + 0x2ac885, + 0x2a0b48, + 0x29c18a, + 0x39e284, + 0x2a3308, + 0x291688, + 0x29f347, + 0x208cc9, + 0x2bc008, + 0x286847, + 0x385e86, + 0x20aeca, + 0x311408, + 0x3a6b09, + 0x2ae608, + 0x228089, + 0x396107, + 0x2fea85, + 0x347306, + 0x2c4a88, + 0x27a888, + 0x28de08, + 0x38ab08, + 0x242085, + 0x203bc4, + 0x236ec8, + 0x209784, + 0x23ec84, + 0x26a985, + 0x290647, + 0x3c1d49, + 0x21e107, + 0x214045, + 0x276dc6, + 0x35bc86, + 0x211a84, + 0x2a0486, + 0x257b44, + 0x2a11c6, + 0x3c1b06, + 0x2181c6, + 0x3c6485, + 0x2a0a07, + 0x205c83, + 0x216e89, + 0x36fb08, + 0x2866c4, + 0x2866cd, + 0x297348, + 0x2ddc88, + 0x3a6a86, + 0x2c5789, + 0x2c6b09, + 0x325705, + 0x29c28a, + 0x252a0a, + 0x25e6cc, + 0x25e846, + 0x277c06, + 0x2c2c86, + 0x372b09, + 0x2eee46, + 0x29f946, + 0x3ab946, + 0x26d1c8, + 0x24e386, + 0x2cca0b, + 0x2907c5, + 0x258a45, + 0x277e85, + 0x3c2806, + 0x20ae83, + 0x244546, + 0x245b07, + 0x2bf845, + 0x3108c5, + 0x340045, + 0x2f83c6, + 0x3257c4, + 0x327886, + 0x2bad89, + 0x3c268c, + 0x2c9948, + 0x23c504, + 0x2f8b46, + 0x284686, + 0x25b148, + 0x218bc8, + 0x3c2589, + 0x331287, + 0x24ed89, + 0x37ba46, + 0x230604, + 0x20d804, + 0x280344, + 0x27fd48, + 0x3c1b8a, + 0x351286, + 0x35e207, + 0x36e207, + 0x242005, + 0x2b3dc4, + 0x28c846, + 0x2b1906, + 0x23a283, + 0x36f947, + 0x3aab88, + 0x32584a, + 0x22ca48, + 0x3643c8, + 0x31e505, + 0x29cdc5, + 0x26e785, + 0x2423c6, + 0x243286, + 0x340cc5, + 0x38db89, + 0x2b3bcc, + 0x26e847, + 0x297f88, + 0x2dee05, + 0x6010c4, + 0x24d184, + 0x280e84, + 0x21b846, + 0x29e4ce, + 0x364807, + 0x283645, + 0x3a6e0c, + 0x2f8f47, + 0x26d647, + 0x2f4449, + 0x21be49, + 0x285dc5, + 0x36fb08, + 0x231589, + 0x319605, + 0x2bf788, + 0x221fc6, + 0x266cc6, + 0x3c2f44, + 0x28b688, + 0x225503, + 0x3875c4, + 0x2cbe45, + 0x388307, + 0x228785, + 0x345b49, + 0x2ab04d, + 0x2b0486, + 0x207704, + 0x2ba8c8, + 0x27084a, + 0x228b87, + 0x31ce85, + 0x23b3c3, + 0x29d70e, + 0x2a8b0c, + 0x2ff747, + 0x29e687, + 0x217b83, + 0x2eee85, + 0x280e85, + 0x299048, + 0x2963c9, + 0x23c406, + 0x25b1c4, + 0x2e7c86, + 0x23390b, + 0x38320c, + 0x33a8c7, + 0x2cccc5, + 0x3b2348, + 0x2d8945, + 0x293247, + 0x2f1587, + 0x245945, + 0x20ae83, + 0x335ac4, + 0x22a385, + 0x3b8cc5, + 0x3b8cc6, + 0x2b5308, + 0x26d6c7, + 0x318386, + 0x205c06, + 0x23f046, + 0x27e509, + 0x21f3c7, + 0x250646, + 0x383386, + 0x275d06, + 0x2a8605, + 0x3c53c6, + 0x3746c5, + 0x2da488, + 0x28ff4b, + 0x28c586, + 0x36e244, + 0x2e0409, + 0x26d004, + 0x221f48, + 0x326107, + 0x281e44, + 0x2bb308, + 0x2c0844, + 0x2a8644, + 0x286605, + 0x2dfc46, + 0x3356c7, + 0x27f283, + 0x29d185, + 0x32ce84, + 0x24fec6, + 0x325788, + 0x2b6c05, + 0x28fc09, + 0x2303c5, + 0x21c988, + 0x2312c7, + 0x38de88, + 0x2ba487, + 0x27df09, + 0x2820c6, + 0x305706, + 0x2b3084, + 0x2d7345, + 0x300a4c, + 0x277e87, + 0x278a87, + 0x36e0c8, + 0x2b0486, + 0x271484, + 0x30a244, + 0x27fec9, + 0x2c2d86, + 0x282687, + 0x277784, + 0x24d786, + 0x317bc5, + 0x2cab47, + 0x2cc986, + 0x24d949, + 0x383047, + 0x2692c7, + 0x29ffc6, + 0x24d6c5, + 0x27d1c8, + 0x221cc8, + 0x348546, + 0x2b6c45, + 0x349e86, + 0x206543, + 0x298ec9, + 0x29c48e, + 0x2ba1c8, + 0x24c848, + 0x34834b, + 0x28fe46, + 0x211584, + 0x281044, + 0x29c58a, + 0x217207, + 0x250705, + 0x216b09, + 0x2be305, + 0x23ecc7, + 0x24e304, + 0x2a9a47, + 0x2e7f08, + 0x2e0dc6, + 0x24c589, + 0x2bc10a, + 0x217186, + 0x296e86, + 0x2ab545, + 0x3898c5, + 0x347ac7, + 0x24cf08, + 0x317b08, + 0x2f74c6, + 0x3075c5, + 0x21168e, + 0x2bc7c4, + 0x298fc5, + 0x276749, + 0x382c48, + 0x28aa46, + 0x29accc, + 0x29bd90, + 0x29e10f, + 0x29f688, + 0x2f0707, + 0x3c6485, + 0x28e705, + 0x335549, + 0x28e709, + 0x240d06, + 0x29ef87, + 0x2d7245, + 0x34d589, + 0x33f106, + 0x2eec8d, + 0x280209, + 0x240084, + 0x2b9f48, + 0x236f89, + 0x351446, + 0x2ebec5, + 0x305706, + 0x3b3109, + 0x277608, + 0x212305, + 0x28b684, + 0x29ae8b, + 0x351305, + 0x242d46, + 0x281786, + 0x285206, + 0x28f64b, + 0x28fd09, + 0x205b45, + 0x396e87, + 0x2f16c6, + 0x240206, + 0x280c08, + 0x2dfd49, + 0x344a0c, + 0x22d8c8, + 0x308ec6, + 0x32c803, + 0x32a506, + 0x27ddc5, + 0x27be48, + 0x306fc6, + 0x2cad88, + 0x2062c5, + 0x27a585, + 0x365288, + 0x31dc07, + 0x317fc7, + 0x26d887, + 0x240088, + 0x2c5508, + 0x2b1206, + 0x2b2047, + 0x267c47, + 0x28f34a, + 0x256c83, + 0x3c2806, + 0x23c645, + 0x2403c4, + 0x279409, + 0x27de84, + 0x250a44, + 0x29a104, + 0x29e68b, + 0x31bd47, + 0x2118c5, + 0x295348, + 0x276dc6, + 0x276dc8, + 0x27b206, + 0x28b5c5, + 0x28b885, + 0x28d446, + 0x28dbc8, + 0x28e288, + 0x2795c6, + 0x29518f, + 0x298990, + 0x369405, + 0x205c83, + 0x2306c5, + 0x309088, + 0x28e609, + 0x319748, + 0x24c408, + 0x238d88, + 0x31be07, + 0x276a89, + 0x2caf88, + 0x28dac4, + 0x299f88, + 0x2addc9, + 0x2b38c7, + 0x299f04, + 0x21e1c8, + 0x2a07ca, + 0x2aff86, + 0x24c486, + 0x229f09, + 0x29bfc7, + 0x2c83c8, + 0x345608, + 0x294048, + 0x25d345, + 0x38a705, + 0x258a45, + 0x280e45, + 0x37ffc7, + 0x20ae85, + 0x2bf845, + 0x206d86, + 0x319687, + 0x2e1787, + 0x2a0ac6, + 0x2ce085, + 0x242d46, + 0x24c685, + 0x2d70c8, + 0x2ff5c4, + 0x393106, + 0x334e84, + 0x31dec8, + 0x22f10a, + 0x27a34c, + 0x23fac5, + 0x283506, + 0x344bc6, + 0x341e06, + 0x308f44, + 0x317e85, + 0x27b047, + 0x29c049, + 0x2c6f47, + 0x6010c4, + 0x6010c4, + 0x31bbc5, + 0x2cb984, + 0x29a68a, + 0x276c46, + 0x251e84, + 0x204745, + 0x36c3c5, + 0x2b1804, + 0x2813c7, + 0x230347, + 0x2c6988, + 0x31fec8, + 0x212309, + 0x26eec8, + 0x29a84b, + 0x2b7fc4, + 0x37b985, + 0x27d905, + 0x26d809, + 0x2dfd49, + 0x2e0308, + 0x22d748, + 0x29f504, + 0x2846c5, + 0x200583, + 0x2c53c5, + 0x297646, + 0x29620c, + 0x21f046, + 0x2ebdc6, + 0x28acc5, + 0x2f8448, + 0x35ec46, + 0x3963c6, + 0x24c486, + 0x22c7cc, + 0x250544, + 0x23f18a, + 0x28ac08, + 0x296047, + 0x32cd86, + 0x23c4c7, + 0x2e7885, + 0x2adf86, + 0x35aa46, + 0x366207, + 0x250a84, + 0x309705, + 0x276744, + 0x2c9c87, + 0x276988, + 0x277a8a, + 0x27ebc7, + 0x2a8207, + 0x2f0687, + 0x2d8a89, + 0x29620a, + 0x22e943, + 0x250985, + 0x218203, + 0x385c49, + 0x336dc8, + 0x351687, + 0x319849, + 0x221dc6, + 0x3b6808, + 0x33c445, + 0x3bf38a, + 0x200c89, + 0x3299c9, + 0x35eb07, + 0x26dac9, + 0x2180c8, + 0x3663c6, + 0x2836c8, + 0x203a47, + 0x22ea87, + 0x245d87, + 0x2e5a48, + 0x2f89c6, + 0x2a0585, + 0x27b047, + 0x296988, + 0x334e04, + 0x2c8004, + 0x28ed07, + 0x2acc47, + 0x23140a, + 0x366346, + 0x364c4a, + 0x2be747, + 0x2bc587, + 0x3097c4, + 0x2aa444, + 0x2caa46, + 0x23a444, + 0x23a44c, + 0x39ee05, + 0x218a09, + 0x337284, + 0x2b18c5, + 0x2707c8, + 0x239dc5, + 0x37f306, + 0x2311c4, + 0x2d02ca, + 0x2cb2c6, + 0x29180a, + 0x2162c7, + 0x224505, + 0x21c645, + 0x24204a, + 0x28dd45, + 0x29ed06, + 0x209784, + 0x2afac6, + 0x347b85, + 0x307086, + 0x2e44cc, + 0x218d4a, + 0x252b04, + 0x2060c6, + 0x29bfc7, + 0x2cc904, + 0x26d1c8, + 0x2f3106, + 0x211509, + 0x2db609, + 0x394949, + 0x2c7246, + 0x203b46, + 0x283807, + 0x38dac8, + 0x203949, + 0x31bd47, + 0x2954c6, + 0x2faac7, + 0x2a6b05, + 0x2bc7c4, + 0x2833c7, + 0x267e05, + 0x286545, + 0x31f747, + 0x245808, + 0x3b22c6, + 0x2977cd, + 0x29924f, + 0x29d54d, + 0x214084, + 0x238986, + 0x2d0688, + 0x3ab905, + 0x28f508, + 0x256f8a, + 0x240084, + 0x233b46, + 0x2cd107, + 0x2ca387, + 0x2cfe09, + 0x283685, + 0x2b1804, + 0x2b49ca, + 0x2bbbc9, + 0x26dbc7, + 0x297a86, + 0x351446, + 0x284606, + 0x371d46, + 0x2cf6cf, + 0x2d0549, + 0x24e386, + 0x354846, + 0x31ac09, + 0x2b2147, + 0x20be43, + 0x22c946, + 0x20de03, + 0x2de988, + 0x2fa907, + 0x29f889, + 0x2b1b88, + 0x318108, + 0x328006, + 0x21ef89, + 0x399b05, + 0x2b2204, + 0x2e8187, + 0x372b85, + 0x214084, + 0x211988, + 0x2174c4, + 0x2b1e87, + 0x30d686, + 0x395ac5, + 0x2ae608, + 0x35130b, + 0x2b4107, + 0x2422c6, + 0x2c2b84, + 0x2b6286, + 0x26a985, + 0x267e05, + 0x27cf49, + 0x280fc9, + 0x22eac4, + 0x22eb05, + 0x206105, + 0x3bf206, + 0x36fc08, + 0x2bdc86, + 0x3aa9cb, + 0x37570a, + 0x2ba585, + 0x28b906, + 0x39df85, + 0x345405, + 0x29b847, + 0x3c2a88, + 0x24ed84, + 0x39ce86, + 0x28e306, + 0x218287, + 0x30eb04, + 0x27bcc6, + 0x35c745, + 0x35c749, + 0x203d44, + 0x2b3f49, + 0x2795c6, + 0x2c0048, + 0x206105, + 0x36e305, + 0x307086, + 0x344909, + 0x21be49, + 0x2ebe46, + 0x382d48, + 0x2ab188, + 0x39df44, + 0x2b5504, + 0x2b5508, + 0x239d08, + 0x24ee89, + 0x2975c6, + 0x24c486, + 0x32be4d, + 0x27c986, + 0x2ce449, + 0x39e845, + 0x20de46, + 0x2941c8, + 0x3277c5, + 0x267c84, + 0x26a985, + 0x280908, + 0x29a449, + 0x276804, + 0x31e346, + 0x251f0a, + 0x2ff648, + 0x231589, + 0x25890a, + 0x3197c6, + 0x299408, + 0x293005, + 0x28ae88, + 0x2e7905, + 0x221c89, + 0x376189, + 0x23c442, + 0x26f3c5, + 0x2ebf86, + 0x279507, + 0x38c8c5, + 0x30d2c6, + 0x304c08, + 0x2b0486, + 0x2d9309, + 0x278b86, + 0x280a88, + 0x2a9605, + 0x382106, + 0x336c48, + 0x27fd48, + 0x396008, + 0x2f2388, + 0x3c53c4, + 0x21e583, + 0x2d9544, + 0x27edc6, + 0x2a6b44, + 0x24c787, + 0x3962c9, + 0x3c0485, + 0x345606, + 0x22c946, + 0x2b514b, + 0x2b0d86, + 0x293b06, + 0x393208, + 0x310806, + 0x224303, + 0x3c4e83, + 0x2bc7c4, + 0x233e85, + 0x2d45c7, + 0x276988, + 0x27698f, + 0x27af4b, + 0x36fa08, + 0x31e3c6, + 0x36fd0e, + 0x242483, + 0x2d4544, + 0x2b0d05, + 0x2b1686, + 0x28c94b, + 0x290706, + 0x227109, + 0x395ac5, + 0x2eccc8, + 0x20e688, + 0x21bd0c, + 0x29e6c6, + 0x2c5406, + 0x2e9f85, + 0x287888, + 0x27a345, + 0x350288, + 0x29b04a, + 0x29d989, + 0x6010c4, + 0x200742, + 0x3c202c42, + 0x202542, + 0x26ff84, + 0x201e42, + 0x21a484, + 0x2032c2, + 0x200342, + 0x207c02, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x20ec83, + 0x241d03, + 0x210143, + 0x28b304, + 0x20be03, + 0x23d744, + 0x237583, + 0x2d2484, + 0x30e843, + 0x38cb87, + 0x21f743, + 0x20ae43, + 0x310f08, + 0x241d03, + 0x2a47cb, + 0x2e8943, + 0x3a25c6, + 0x20e982, + 0x3987cb, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x241d03, + 0x2098c3, + 0x204543, + 0x200742, + 0xcd588, + 0x3574c5, + 0x267e88, + 0x2e2e08, + 0x202c42, + 0x3325c5, + 0x331707, + 0x201b02, + 0x248887, + 0x202542, + 0x256807, + 0x3c0b89, + 0x292bc8, + 0x293ec9, + 0x247342, + 0x269ec7, + 0x329844, + 0x3317c7, + 0x375607, + 0x25fe82, + 0x21f743, + 0x20d682, + 0x2032c2, + 0x200342, + 0x20b182, + 0x200382, + 0x207c02, + 0x2a9105, + 0x24d0c5, + 0x2c42, + 0x37583, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x10c43, + 0x781, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x21a003, + 0x3f0eca46, + 0x6f803, + 0x7f685, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x9482, + 0xcd588, + 0xae43, + 0xaff03, + 0x4cec4, + 0xd8d45, + 0x200742, + 0x3a4c04, + 0x20be03, + 0x237583, + 0x30e843, + 0x3a2f03, + 0x232585, + 0x214bc3, + 0x20f003, + 0x20ec83, + 0x228803, + 0x241d03, + 0x207c03, + 0x2605c3, + 0x203f83, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x202c42, + 0x241d03, + 0xcd588, + 0x30e843, + 0xaff03, + 0xcd588, + 0xaff03, + 0x2b8283, + 0x20be03, + 0x234784, + 0x237583, + 0x30e843, + 0x207d02, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x207d02, + 0x20be83, + 0x20ec83, + 0x241d03, + 0x2e2d83, + 0x207c03, + 0x200742, + 0x202c42, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x3a25c5, + 0x9a2c6, + 0x28b304, + 0x20e982, + 0xcd588, + 0x200742, + 0x20288, + 0x132983, + 0x202c42, + 0x43490186, + 0x12b44, + 0x10bfcb, + 0x41546, + 0x1f847, + 0x237583, + 0x52748, + 0x30e843, + 0xef4c5, + 0xe84, + 0x222003, + 0x56c47, + 0xd4344, + 0x20ec83, + 0xafd44, + 0xaff03, + 0x241d03, + 0x2e9484, + 0xfdb48, + 0x157206, + 0x10d08, + 0x135fc5, + 0x126749, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ae43, + 0x241d03, + 0x2e8943, + 0x20e982, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff83, + 0x226444, + 0x20ec83, + 0xae43, + 0x241d03, + 0x20be03, + 0x237583, + 0x2d2484, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x3a25c6, + 0x237583, + 0x30e843, + 0x181c43, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x1f847, + 0xcd588, + 0x30e843, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x45e0be03, + 0x237583, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x200742, + 0x202c42, + 0x20be03, + 0x30e843, + 0x20ec83, + 0x200342, + 0x241d03, + 0x32e147, + 0x23b00b, + 0x205d03, + 0x2409c8, + 0x38d847, + 0x224906, + 0x2c1605, + 0x38e5c9, + 0x21f4c8, + 0x36b4c9, + 0x39b210, + 0x36b4cb, + 0x2f6809, + 0x207503, + 0x22bcc9, + 0x235b46, + 0x235b4c, + 0x357588, + 0x3c02c8, + 0x200289, + 0x2e244e, + 0x3c094b, + 0x34754c, + 0x205583, + 0x27b80c, + 0x205589, + 0x2fbf47, + 0x2374cc, + 0x2ad14a, + 0x253404, + 0x32a08d, + 0x27b6c8, + 0x21014d, + 0x28a7c6, + 0x28b30b, + 0x31d689, + 0x27a747, + 0x3c2c46, + 0x341409, + 0x32538a, + 0x313048, + 0x2e8544, + 0x332e87, + 0x249d47, + 0x277984, + 0x2d9a44, + 0x386dc9, + 0x364209, + 0x215cc8, + 0x2108c5, + 0x2ea8c5, + 0x20d106, + 0x329f49, + 0x25720d, + 0x2f3308, + 0x20d007, + 0x2c1688, + 0x23ce86, + 0x37ce44, + 0x286c45, + 0x203846, + 0x2043c4, + 0x205487, + 0x2081ca, + 0x213904, + 0x2170c6, + 0x217d49, + 0x217d4f, + 0x21870d, + 0x218fc6, + 0x21fe90, + 0x220286, + 0x220807, + 0x221447, + 0x22144f, + 0x222149, + 0x228d46, + 0x22a1c7, + 0x22a1c8, + 0x22b089, + 0x395b88, + 0x2de4c7, + 0x22cf43, + 0x3bcfc6, + 0x3bbb08, + 0x2e270a, + 0x387809, + 0x212743, + 0x331606, + 0x39ccca, + 0x2e4b47, + 0x2fbd8a, + 0x209a8e, + 0x222286, + 0x26f5c7, + 0x21bac6, + 0x205646, + 0x38a50b, + 0x34edca, + 0x309b0d, + 0x203c07, + 0x260648, + 0x260649, + 0x26064f, + 0x34eb0c, + 0x27c0c9, + 0x2d778e, + 0x38cc8a, + 0x293886, + 0x2f9f06, + 0x313ccc, + 0x315a8c, + 0x328308, + 0x353d07, + 0x26d545, + 0x290504, + 0x202c4e, + 0x266f84, + 0x3188c7, + 0x39270a, + 0x3a2a54, + 0x3b92cf, + 0x221608, + 0x3bce88, + 0x33984d, + 0x33984e, + 0x231a09, + 0x232b08, + 0x232b0f, + 0x2371cc, + 0x2371cf, + 0x2386c7, + 0x23dfca, + 0x22c54b, + 0x23f5c8, + 0x242707, + 0x2616cd, + 0x20a286, + 0x32a246, + 0x2443c9, + 0x2a6f88, + 0x249208, + 0x24920e, + 0x23b107, + 0x24b505, + 0x24cc85, + 0x204c44, + 0x224bc6, + 0x215bc8, + 0x322ec3, + 0x397e4e, + 0x261a88, + 0x2ae14b, + 0x301c07, + 0x2f7305, + 0x27b986, + 0x2aab07, + 0x2f4848, + 0x317909, + 0x20a505, + 0x284bc8, + 0x226e06, + 0x39caca, + 0x202b49, + 0x237589, + 0x23758b, + 0x3211c8, + 0x277849, + 0x210986, + 0x36db0a, + 0x2bf50a, + 0x23e1cc, + 0x21e907, + 0x2929ca, + 0x211d0b, + 0x211d19, + 0x30a988, + 0x3a2645, + 0x261886, + 0x26ba89, + 0x358706, + 0x2d340a, + 0x21f6c6, + 0x225784, + 0x2c380d, + 0x323307, + 0x225789, + 0x24e685, + 0x251548, + 0x252509, + 0x252944, + 0x253307, + 0x253308, + 0x253907, + 0x268688, + 0x257887, + 0x205dc5, + 0x25d60c, + 0x25de49, + 0x30590a, + 0x3a0e09, + 0x22bdc9, + 0x37924c, + 0x26004b, + 0x260dc8, + 0x261e88, + 0x265244, + 0x281b08, + 0x283c89, + 0x2ad207, + 0x217f86, + 0x31d907, + 0x3843c9, + 0x335c0b, + 0x2b6107, + 0x3c6847, + 0x216407, + 0x2100c4, + 0x2100c5, + 0x278845, + 0x34c04b, + 0x3ad084, + 0x320d08, + 0x28550a, + 0x226ec7, + 0x35b207, + 0x28c112, + 0x2a10c6, + 0x234106, + 0x2b658e, + 0x2a4a86, + 0x291508, + 0x291a8f, + 0x210508, + 0x38b748, + 0x2bb78a, + 0x2bb791, + 0x2a0d4e, + 0x242a0a, + 0x242a0c, + 0x232d07, + 0x232d10, + 0x3c4cc8, + 0x2a0f45, + 0x2aae0a, + 0x20440c, + 0x29580d, + 0x2fc446, + 0x2fc447, + 0x2fc44c, + 0x30460c, + 0x214bcc, + 0x2ab88b, + 0x3706c4, + 0x211dc4, + 0x388489, + 0x30a2c7, + 0x23dd89, + 0x2bf349, + 0x2ace07, + 0x2acfc6, + 0x2acfc9, + 0x2ad3c3, + 0x2b058a, + 0x31b487, + 0x3491cb, + 0x30998a, + 0x3298c4, + 0x316946, + 0x27ee49, + 0x23a2c4, + 0x39eeca, + 0x2425c5, + 0x2bcbc5, + 0x2bcbcd, + 0x2bcf0e, + 0x2d9685, + 0x32d506, + 0x3a21c7, + 0x25d88a, + 0x37a506, + 0x2e1304, + 0x303707, + 0x246a4b, + 0x23cf47, + 0x3c1a04, + 0x390706, + 0x39070d, + 0x38408c, + 0x20eb46, + 0x2f350a, + 0x27c686, + 0x285788, + 0x354b47, + 0x23618a, + 0x24b386, + 0x203b03, + 0x294346, + 0x3bb988, + 0x38860a, + 0x2cb107, + 0x2cb108, + 0x30df84, + 0x28c687, + 0x201cc8, + 0x2a12c8, + 0x2827c8, + 0x2b130a, + 0x2d8345, + 0x20be87, + 0x242853, + 0x25a706, + 0x21b388, + 0x227609, + 0x248748, + 0x32808b, + 0x318488, + 0x246b84, + 0x365386, + 0x311e06, + 0x2dfa89, + 0x382807, + 0x25d708, + 0x29c806, + 0x31f644, + 0x341d05, + 0x2c7e48, + 0x34398a, + 0x2c3488, + 0x2c8906, + 0x29960a, + 0x3b8e48, + 0x2cc708, + 0x2cd2c8, + 0x2cdd46, + 0x2d0886, + 0x3a08cc, + 0x2d0e10, + 0x29ea45, + 0x210308, + 0x394490, + 0x210310, + 0x39b08e, + 0x3a054e, + 0x3a0554, + 0x3a624f, + 0x3a6606, + 0x321391, + 0x31eb13, + 0x31ef88, + 0x3ab285, + 0x240f08, + 0x387b05, + 0x2da18c, + 0x22cd09, + 0x290349, + 0x22d187, + 0x251309, + 0x24f147, + 0x2f8cc6, + 0x286a47, + 0x2034c5, + 0x210c83, + 0x323089, + 0x2278c9, + 0x381c43, + 0x38c7c4, + 0x326f0d, + 0x347c8f, + 0x31f685, + 0x3175c6, + 0x225a47, + 0x357307, + 0x2f5886, + 0x2f588b, + 0x2a2bc5, + 0x25f106, + 0x2f9d87, + 0x258249, + 0x375d06, + 0x322285, + 0x374e4b, + 0x3be7c6, + 0x229a85, + 0x27dc08, + 0x2b2bc8, + 0x2aec8c, + 0x2aec90, + 0x2ae949, + 0x2bd487, + 0x2d8e4b, + 0x306746, + 0x2de38a, + 0x2df80b, + 0x2e094a, + 0x2e0bc6, + 0x2e2c45, + 0x31b1c6, + 0x2b7048, + 0x22d24a, + 0x3394dc, + 0x2e8a0c, + 0x2e8d08, + 0x3a25c5, + 0x361f87, + 0x209946, + 0x270bc5, + 0x21a846, + 0x2f5a48, + 0x2bbe47, + 0x2e2348, + 0x25a7ca, + 0x225b4c, + 0x20d309, + 0x345787, + 0x222a04, + 0x24cd46, + 0x38b2ca, + 0x2bf445, + 0x2110cc, + 0x213588, + 0x373608, + 0x2238cc, + 0x2dd30c, + 0x329409, + 0x329647, + 0x24398c, + 0x22c044, + 0x2482ca, + 0x3033cc, + 0x27280b, + 0x372f0b, + 0x253786, + 0x258d87, + 0x232f47, + 0x232f4f, + 0x2fce11, + 0x2d5b52, + 0x2590cd, + 0x2590ce, + 0x25940e, + 0x3a6408, + 0x3a6412, + 0x25eb08, + 0x38d487, + 0x2556ca, + 0x355cc8, + 0x2a4a45, + 0x37fe0a, + 0x220607, + 0x316244, + 0x221b83, + 0x378205, + 0x2bba07, + 0x307c47, + 0x295a0e, + 0x399e8d, + 0x39b5c9, + 0x20fd85, + 0x3b00c3, + 0x20ab06, + 0x25f705, + 0x2ae388, + 0x2b9609, + 0x2618c5, + 0x2618cf, + 0x2e2a87, + 0x38e505, + 0x3025ca, + 0x2b1506, + 0x25b949, + 0x2f640c, + 0x2f80c9, + 0x3be506, + 0x28530c, + 0x32c906, + 0x2fb508, + 0x2fba86, + 0x30ab06, + 0x2b0f04, + 0x30d603, + 0x38668a, + 0x216711, + 0x27c28a, + 0x272085, + 0x282347, + 0x25ab47, + 0x201dc4, + 0x201dcb, + 0x293d48, + 0x2ba046, + 0x36e145, + 0x33d8c4, + 0x362109, + 0x202a84, + 0x249047, + 0x300585, + 0x300587, + 0x2b67c5, + 0x3482c3, + 0x38d348, + 0x317c4a, + 0x27f283, + 0x35750a, + 0x3b3486, + 0x26164f, + 0x3b8349, + 0x397dd0, + 0x2effc8, + 0x2c8d49, + 0x296707, + 0x39068f, + 0x319c04, + 0x2d2504, + 0x220106, + 0x34f846, + 0x2d6e8a, + 0x253c06, + 0x352987, + 0x303f48, + 0x304147, + 0x3049c7, + 0x305b8a, + 0x3083cb, + 0x341ac5, + 0x2d5788, + 0x256703, + 0x3b6b0c, + 0x35d60f, + 0x26d34d, + 0x25e287, + 0x39b709, + 0x36de47, + 0x282c48, + 0x3a2c4c, + 0x2c8a48, + 0x2701c8, + 0x31c38e, + 0x333d94, + 0x3342a4, + 0x35308a, + 0x36becb, + 0x24f204, + 0x24f209, + 0x233bc8, + 0x24d305, + 0x3229ca, + 0x261cc7, + 0x31b0c4, + 0x2b6c03, + 0x20be03, + 0x23d744, + 0x237583, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x2d0e06, + 0x226444, + 0x20ec83, + 0x241d03, + 0x219543, + 0x200742, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x23d744, + 0x237583, + 0x30e843, + 0x214bc3, + 0x2d0e06, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x203d43, + 0x20ec83, + 0xaff03, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x226444, + 0x20ec83, + 0x241d03, + 0x200742, + 0x24be43, + 0x202c42, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x203cc2, + 0x24d5c2, + 0x202c42, + 0x20be03, + 0x20b2c2, + 0x201342, + 0x26ff84, + 0x21a484, + 0x227d42, + 0x226444, + 0x200342, + 0x241d03, + 0x219543, + 0x253786, + 0x230882, + 0x204182, + 0x228382, + 0x48610503, + 0x48a32d03, + 0x5b586, + 0x5b586, + 0x28b304, + 0x20ae43, + 0x15a4a, + 0x3ba4c, + 0x121ecc, + 0x7f48d, + 0x117485, + 0x2aa47, + 0x14ec6, + 0x19148, + 0x1c787, + 0x23288, + 0x1807ca, + 0x102c07, + 0x496d2fc5, + 0x133789, + 0x3c00b, + 0x18754b, + 0x1bdd08, + 0xf608a, + 0x8a34e, + 0x144854b, + 0x12b44, + 0x5f246, + 0x8808, + 0x7e948, + 0x3c2c7, + 0x910c7, + 0x78449, + 0x3a347, + 0x67008, + 0x100249, + 0x170bc4, + 0x191e05, + 0x12f34e, + 0xa964d, + 0x1f6c8, + 0x49b64046, + 0x4a564048, + 0x739c8, + 0x12e350, + 0x5978c, + 0x666c7, + 0x66e47, + 0x6abc7, + 0x704c7, + 0xd0c2, + 0x1807, + 0x14ef8c, + 0x11d107, + 0xa4686, + 0xa5a89, + 0xa7708, + 0x552c2, + 0x1342, + 0x3900b, + 0xafdc7, + 0x25589, + 0x52c49, + 0x142188, + 0xb0102, + 0x1970c9, + 0xc9f8a, + 0xc6209, + 0xd3909, + 0xd50c8, + 0xd6007, + 0xd82c9, + 0xda885, + 0xdae90, + 0x138ac6, + 0x14a345, + 0xeb78d, + 0x2bac6, + 0xe3d47, + 0xe9498, + 0x3a6c8, + 0x14640a, + 0x16f02, + 0x5f88d, + 0x1282, + 0x7dac6, + 0x8cc08, + 0x6e308, + 0xcd449, + 0x1be608, + 0x6f98e, + 0xab07, + 0xfe68d, + 0xf26c5, + 0x1588, + 0x1a1e08, + 0xfeec6, + 0xc842, + 0x157206, + 0xdc2, + 0x2c1, + 0x60a07, + 0x8b003, + 0x49ee9d04, + 0x4a294a43, + 0x101, + 0x13d06, + 0x101, + 0x301, + 0x13d06, + 0x8b003, + 0x140e3c5, + 0x253404, + 0x20be03, + 0x254a04, + 0x26ff84, + 0x20ec83, + 0x2274c5, + 0x21a003, + 0x24f3c3, + 0x2f5805, + 0x203f83, + 0x4b60be03, + 0x237583, + 0x30e843, + 0x200541, + 0x21f743, + 0x21a484, + 0x226444, + 0x20ec83, + 0x241d03, + 0x207c03, + 0xcd588, + 0x200742, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x237583, + 0x203d43, + 0x201342, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x20ec83, + 0x20ae43, + 0x241d03, + 0x203f83, + 0xcd588, + 0x35d382, + 0x1851c7, + 0x2c42, + 0x141c85, + 0x598cf, + 0x1455908, + 0x10430e, + 0x4c607402, + 0x31a848, + 0x307206, + 0x2c1146, + 0x306b87, + 0x4ca11a42, + 0x4cfb81c8, + 0x21ed8a, + 0x266148, + 0x200602, + 0x31b2c9, + 0x341b07, + 0x217f06, + 0x38d089, + 0x20bfc4, + 0x20e2c6, + 0x2f2904, + 0x270984, + 0x25cf89, + 0x3030c6, + 0x24d185, + 0x2faf45, + 0x2322c7, + 0x2be9c7, + 0x354984, + 0x306dc6, + 0x2ff205, + 0x309485, + 0x39dec5, + 0x2ea687, + 0x301a45, + 0x319149, + 0x336f85, + 0x2f4984, + 0x37a447, + 0x348a0e, + 0x3aaec9, + 0x2b6449, + 0x335246, + 0x2454c8, + 0x2ee34b, + 0x35bd8c, + 0x33b746, + 0x347407, + 0x2afbc5, + 0x2d9a4a, + 0x215dc9, + 0x366f09, + 0x332706, + 0x2f9b45, + 0x383105, + 0x338689, + 0x39e04b, + 0x275e86, + 0x343ec6, + 0x203104, + 0x28bdc6, + 0x24b588, + 0x3bb806, + 0x21d186, + 0x206888, + 0x209347, + 0x209509, + 0x20acc5, + 0xcd588, + 0x291044, + 0x304f44, + 0x210f05, + 0x3a8c49, + 0x226087, + 0x22608b, + 0x2288ca, + 0x22cc45, + 0x4d207a42, + 0x309847, + 0x4d62cf48, + 0x370a47, + 0x383e85, + 0x32654a, + 0x2c42, + 0x2fac0b, + 0x2579ca, + 0x2277c6, + 0x210d83, + 0x2a4f8d, + 0x3aa74c, + 0x3bedcd, + 0x24e2c5, + 0x37dd45, + 0x322f07, + 0x20b2c9, + 0x21ec86, + 0x253a85, + 0x2ced88, + 0x28bcc3, + 0x2e3108, + 0x28bcc8, + 0x2c2107, + 0x30f108, + 0x3aa549, + 0x286d47, + 0x23ab87, + 0x224788, + 0x38ae04, + 0x38ae07, + 0x28a6c8, + 0x353706, + 0x3b544f, + 0x2293c7, + 0x2de646, + 0x329785, + 0x228503, + 0x391c87, + 0x378183, + 0x253e86, + 0x2553c6, + 0x255b06, + 0x28fa05, + 0x268683, + 0x396d48, + 0x379c89, + 0x38eb4b, + 0x255c88, + 0x257545, + 0x258b85, + 0x4db29982, + 0x286b09, + 0x38d707, + 0x25f185, + 0x25ce87, + 0x25e9c6, + 0x371c05, + 0x25f54b, + 0x260dc4, + 0x265d05, + 0x265e47, + 0x275806, + 0x275c45, + 0x281d07, + 0x2829c7, + 0x2e1704, + 0x28990a, + 0x289dc8, + 0x293089, + 0x241245, + 0x364946, + 0x24b74a, + 0x2fae46, + 0x268fc7, + 0x292d4d, + 0x2a2709, + 0x3954c5, + 0x2031c7, + 0x31f148, + 0x336a08, + 0x209ec7, + 0x3be1c6, + 0x21d4c7, + 0x255083, + 0x303044, + 0x36e785, + 0x39fc47, + 0x3a4609, + 0x230cc8, + 0x33dc85, + 0x2363c4, + 0x253d45, + 0x39038d, + 0x211742, + 0x2bd986, + 0x27da06, + 0x2dac0a, + 0x381346, + 0x38b205, + 0x31ffc5, + 0x31ffc7, + 0x39c90c, + 0x27384a, + 0x28ba86, + 0x2c4d85, + 0x28bc06, + 0x28bf47, + 0x28d986, + 0x28f90c, + 0x38d1c9, + 0x4de14187, + 0x291e45, + 0x291e46, + 0x2944c8, + 0x247e85, + 0x2a3505, + 0x2a3c88, + 0x2a3e8a, + 0x4e278142, + 0x4e607c82, + 0x2d7485, + 0x2a6b43, + 0x2461c8, + 0x211b83, + 0x2a4104, + 0x25ba8b, + 0x211b88, + 0x2ce288, + 0x4eb1cb49, + 0x2a8e09, + 0x2a9546, + 0x2aa788, + 0x2aa989, + 0x2ab386, + 0x2ab505, + 0x24e0c6, + 0x2abfc9, + 0x3ac147, + 0x381fc6, + 0x2d8787, + 0x21eb07, + 0x34e204, + 0x4ee3a9c9, + 0x270e08, + 0x3b80c8, + 0x31f887, + 0x2c2f46, + 0x20b0c9, + 0x2f2bc7, + 0x33194a, + 0x364a88, + 0x3be307, + 0x20ed86, + 0x39d28a, + 0x372cc8, + 0x382ac5, + 0x22b9c5, + 0x30b047, + 0x36ac49, + 0x30280b, + 0x314248, + 0x337009, + 0x255f87, + 0x2b868c, + 0x2b8c8c, + 0x2b8f8a, + 0x2b920c, + 0x2c10c8, + 0x2c12c8, + 0x2c14c4, + 0x2c1889, + 0x2c1ac9, + 0x2c1d0a, + 0x2c1f89, + 0x2c22c7, + 0x3b4c4c, + 0x23d386, + 0x3c0708, + 0x2faf06, + 0x37fc46, + 0x3953c7, + 0x3ab0c8, + 0x349c4b, + 0x370907, + 0x35b849, + 0x3782c9, + 0x254b87, + 0x2f2b44, + 0x282487, + 0x2eaf46, + 0x215946, + 0x2f36c5, + 0x3720c8, + 0x290244, + 0x290246, + 0x27370b, + 0x2b0889, + 0x39ddc6, + 0x204cc9, + 0x2ea806, + 0x22e688, + 0x20b4c3, + 0x2f9cc5, + 0x21d2c9, + 0x228b05, + 0x30ae84, + 0x274d06, + 0x3993c5, + 0x259b06, + 0x308747, + 0x331086, + 0x23078b, + 0x36da07, + 0x256e46, + 0x348606, + 0x232386, + 0x354949, + 0x2e474a, + 0x2ba345, + 0x3be8cd, + 0x2a3f86, + 0x2e9106, + 0x397cc6, + 0x285705, + 0x2db187, + 0x2f75c7, + 0x207cce, + 0x21f743, + 0x2c2f09, + 0x358489, + 0x2d9e47, + 0x26c287, + 0x2a1445, + 0x2ae085, + 0x4f386f0f, + 0x2c8f87, + 0x2c9148, + 0x2c9884, + 0x2c9e46, + 0x4f64cd02, + 0x2cdfc6, + 0x2d0e06, + 0x349f8e, + 0x2e2f4a, + 0x3b8946, + 0x2ca24a, + 0x2065c9, + 0x231e85, + 0x344788, + 0x39a146, + 0x29aac8, + 0x3c2dc8, + 0x2a57cb, + 0x306c85, + 0x301ac8, + 0x2069cc, + 0x383d47, + 0x255606, + 0x27c4c8, + 0x224a88, + 0x4fa53982, + 0x20e08b, + 0x3361c9, + 0x21cb09, + 0x39dc47, + 0x38a7c8, + 0x4fe3ca88, + 0x21318b, + 0x342009, + 0x28394d, + 0x24e488, + 0x3518c8, + 0x502056c2, + 0x331404, + 0x50623b82, + 0x2f7e06, + 0x50a0a542, + 0x24fc8a, + 0x204b86, + 0x22e0c8, + 0x2be048, + 0x326cc6, + 0x398b46, + 0x2efd46, + 0x2ae305, + 0x240684, + 0x50e2e604, + 0x34c986, + 0x2a2247, + 0x5121c1c7, + 0x2e1bcb, + 0x348dc9, + 0x37dd8a, + 0x357ec4, + 0x320108, + 0x381d8d, + 0x2e6e49, + 0x2e7088, + 0x2e7709, + 0x2e9484, + 0x22c404, + 0x27d505, + 0x2ee68b, + 0x211b06, + 0x34c7c5, + 0x222449, + 0x306e88, + 0x29fb44, + 0x2d9bc9, + 0x326b05, + 0x2bea08, + 0x23b247, + 0x2b6848, + 0x27f046, + 0x207907, + 0x2d4109, + 0x374fc9, + 0x229b05, + 0x240305, + 0x51607482, + 0x2f4744, + 0x225dc5, + 0x292786, + 0x2f8305, + 0x297b87, + 0x34ca85, + 0x275844, + 0x335306, + 0x253b07, + 0x234fc6, + 0x384305, + 0x20e4c8, + 0x307405, + 0x20ef87, + 0x2154c9, + 0x2b09ca, + 0x34e587, + 0x34e58c, + 0x24d146, + 0x241b89, + 0x244885, + 0x247dc8, + 0x201283, + 0x210945, + 0x2eac05, + 0x257f47, + 0x51a12c02, + 0x398347, + 0x2f3c06, + 0x32fdc6, + 0x2f7f46, + 0x2249c6, + 0x2eb408, + 0x241045, + 0x2de707, + 0x2de70d, + 0x221b83, + 0x221b85, + 0x302387, + 0x398688, + 0x301f45, + 0x219788, + 0x23dc86, + 0x333947, + 0x3c0645, + 0x306d06, + 0x3a4c85, + 0x226bca, + 0x2fe986, + 0x22eec7, + 0x2f04c5, + 0x2ffc87, + 0x303684, + 0x30ae06, + 0x3446c5, + 0x357a0b, + 0x2eadc9, + 0x24bf4a, + 0x229b88, + 0x34c2c8, + 0x34cb8c, + 0x353847, + 0x36f808, + 0x387c08, + 0x394205, + 0x3a684a, + 0x3b00c9, + 0x51e01b42, + 0x3c6646, + 0x222e44, + 0x222e49, + 0x294f49, + 0x276587, + 0x2f9887, + 0x2bf1c9, + 0x285908, + 0x28590f, + 0x21dec6, + 0x2d2c8b, + 0x2f5645, + 0x2f5647, + 0x2f5c49, + 0x25bbc6, + 0x2d9b47, + 0x2d5ec5, + 0x234dc4, + 0x341006, + 0x226244, + 0x2df647, + 0x2ce808, + 0x522f9a48, + 0x2fa1c5, + 0x2fa307, + 0x24eb09, + 0x20de44, + 0x2473c8, + 0x52716788, + 0x201dc4, + 0x235fc8, + 0x3c2d04, + 0x3bebc9, + 0x21b2c5, + 0x52a0e982, + 0x21df05, + 0x2cb8c5, + 0x203008, + 0x238507, + 0x52e02a82, + 0x339e45, + 0x2cc586, + 0x249746, + 0x2f4708, + 0x2f4c88, + 0x2f82c6, + 0x30a146, + 0x385289, + 0x32fd06, + 0x29124b, + 0x3478c5, + 0x355c06, + 0x28e088, + 0x231bc6, + 0x20a386, + 0x219c4a, + 0x2a91ca, + 0x370c85, + 0x241107, + 0x30d0c6, + 0x53206d02, + 0x3024c7, + 0x260505, + 0x24b6c4, + 0x24b6c5, + 0x357dc6, + 0x271a47, + 0x220105, + 0x295004, + 0x2ad5c8, + 0x20a445, + 0x309fc7, + 0x3b1c45, + 0x226b05, + 0x268904, + 0x2abac9, + 0x2ff048, + 0x399286, + 0x2adc46, + 0x201ac6, + 0x536ff908, + 0x2ffb07, + 0x2ffe4d, + 0x30074c, + 0x300d49, + 0x300f89, + 0x53b65c82, + 0x3b7e83, + 0x2228c3, + 0x2eb005, + 0x39fd4a, + 0x32fbc6, + 0x3052c5, + 0x308904, + 0x30890b, + 0x323e4c, + 0x32488c, + 0x324b95, + 0x32754d, + 0x32a68f, + 0x32aa52, + 0x32aecf, + 0x32b292, + 0x32b713, + 0x32bbcd, + 0x32c18d, + 0x32c50e, + 0x32ca8e, + 0x32d2cc, + 0x32d68c, + 0x32dacb, + 0x32de4e, + 0x32e752, + 0x32f98c, + 0x32ff50, + 0x33ba12, + 0x33c68c, + 0x33cd4d, + 0x33d08c, + 0x33f651, + 0x34404d, + 0x34ac8d, + 0x34b28a, + 0x34b50c, + 0x34be0c, + 0x34c4cc, + 0x34ce8c, + 0x350493, + 0x350b10, + 0x350f10, + 0x351acd, + 0x3520cc, + 0x352dc9, + 0x35518d, + 0x3554d3, + 0x356491, + 0x3568d3, + 0x35888f, + 0x358c4c, + 0x358f4f, + 0x35930d, + 0x35990f, + 0x359cd0, + 0x35a74e, + 0x35df0e, + 0x35e490, + 0x35f08d, + 0x35fa0e, + 0x35fd8c, + 0x360d93, + 0x3628ce, + 0x362f50, + 0x363351, + 0x36378f, + 0x363b53, + 0x36580d, + 0x365b4f, + 0x365f0e, + 0x3665d0, + 0x3669c9, + 0x367d10, + 0x36830f, + 0x36898f, + 0x368d52, + 0x369e0e, + 0x36a80d, + 0x36ae8d, + 0x36b1cd, + 0x36c84d, + 0x36cb8d, + 0x36ced0, + 0x36d2cb, + 0x36e54c, + 0x36e8cc, + 0x36eecc, + 0x36f1ce, + 0x37bbd0, + 0x37e012, + 0x37e48b, + 0x37e98e, + 0x37ed0e, + 0x37f58e, + 0x37fa0b, + 0x53f80196, + 0x38104d, + 0x3814d4, + 0x38228d, + 0x386915, + 0x388c4d, + 0x3895cf, + 0x389ccf, + 0x38ee0f, + 0x38f1ce, + 0x38f74d, + 0x391891, + 0x394b8c, + 0x394e8c, + 0x39518b, + 0x39560c, + 0x39654f, + 0x396912, + 0x39950d, + 0x39ae0c, + 0x39b94c, + 0x39bc4d, + 0x39bf8f, + 0x39c34e, + 0x39fa0c, + 0x39ffcd, + 0x3a030b, + 0x3a0bcc, + 0x3a14cd, + 0x3a180e, + 0x3a1b89, + 0x3a3553, + 0x3a3a8d, + 0x3a3dcd, + 0x3a43cc, + 0x3a484e, + 0x3a520f, + 0x3a55cc, + 0x3a58cd, + 0x3a5c0f, + 0x3a5fcc, + 0x3a778c, + 0x3a7b0c, + 0x3a7e0c, + 0x3a84cd, + 0x3a8812, + 0x3a8e8c, + 0x3a918c, + 0x3a9491, + 0x3a98cf, + 0x3a9c8f, + 0x3aa053, + 0x3ac70e, + 0x3aca8f, + 0x3ace4c, + 0x543ad18e, + 0x3ad50f, + 0x3ad8d6, + 0x3ae0d2, + 0x3af8cc, + 0x3b030f, + 0x3b098d, + 0x3b0ccf, + 0x3b108c, + 0x3b138d, + 0x3b16cd, + 0x3b2c8e, + 0x3b3bcc, + 0x3b3ecc, + 0x3b41d0, + 0x3b7211, + 0x3b764b, + 0x3b7a8c, + 0x3b7d8e, + 0x3baa91, + 0x3baece, + 0x3bb24d, + 0x3c338b, + 0x3c3c8f, + 0x3c4794, + 0x25cd82, + 0x25cd82, + 0x2032c3, + 0x25cd82, + 0x2032c3, + 0x25cd82, + 0x2009c2, + 0x24e105, + 0x3ba78c, + 0x25cd82, + 0x25cd82, + 0x2009c2, + 0x25cd82, + 0x294b45, + 0x2b09c5, + 0x25cd82, + 0x25cd82, + 0x200302, + 0x294b45, + 0x328909, + 0x35618c, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x24e105, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x200302, + 0x328909, + 0x25cd82, + 0x25cd82, + 0x25cd82, + 0x2b09c5, + 0x25cd82, + 0x2b09c5, + 0x35618c, + 0x3ba78c, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x20ec83, + 0x241d03, + 0x1233c8, + 0x6fac4, + 0xae43, + 0x193708, + 0x200742, + 0x55202c42, + 0x246d03, + 0x252d84, + 0x206c03, + 0x3c24c4, + 0x234106, + 0x2137c3, + 0x2f9084, + 0x2f0b05, + 0x21f743, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x22d50a, + 0x253786, + 0x37f08c, + 0xcd588, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x20be83, + 0x2d0e06, + 0x20ec83, + 0x241d03, + 0x219543, + 0xa4d48, + 0x117485, + 0x187689, + 0x15842, + 0x56793ec5, + 0x2aa47, + 0xaf148, + 0xd9ce, + 0x87e92, + 0x11b5cb, + 0x102d06, + 0x56ad2fc5, + 0x56ed2fcc, + 0x25147, + 0x15da87, + 0x1208ca, + 0x42ed0, + 0x149445, + 0x10bfcb, + 0x7e948, + 0x3c2c7, + 0xf400b, + 0x78449, + 0x127b07, + 0x3a347, + 0x760c7, + 0x3c206, + 0x67008, + 0x57429546, + 0xa964d, + 0x120290, + 0x5780a6c2, + 0x1f6c8, + 0x82dd0, + 0x15b5cc, + 0x57f61e0d, + 0x5fbc8, + 0x6b8c7, + 0x164f49, + 0x5b646, + 0x946c8, + 0xebfc2, + 0x71e8a, + 0x31047, + 0x11d107, + 0xa5a89, + 0xa7708, + 0xef4c5, + 0xe85ce, + 0x1260e, + 0x1b98f, + 0x25589, + 0x52c49, + 0x7e10b, + 0x8ef4f, + 0xabccc, + 0x1125cb, + 0x105848, + 0xe1ac7, + 0xf87c8, + 0x132c8b, + 0x15274c, + 0x15af0c, + 0x1625cc, + 0x16784d, + 0x142188, + 0xfcdc2, + 0x1970c9, + 0x133acb, + 0xc3146, + 0x12e28b, + 0xd560a, + 0xd61c5, + 0xdae90, + 0xdd606, + 0x140806, + 0x14a345, + 0x18a988, + 0xe3d47, + 0xe4007, + 0x1fcc7, + 0xf7c4a, + 0xaefca, + 0x7dac6, + 0x9088d, + 0x6e308, + 0x1be608, + 0x68849, + 0xb7cc5, + 0xf778c, + 0x167a4b, + 0x16ab84, + 0xfec89, + 0xfeec6, + 0x4cb06, + 0x4182, + 0x157206, + 0x14634b, + 0x10ac87, + 0xdc2, + 0xc5105, + 0x16704, + 0x781, + 0x7bc3, + 0x572a6d06, + 0x94a43, + 0x2542, + 0x31044, + 0x602, + 0x8b304, + 0xcc2, + 0x7ec2, + 0x3102, + 0x114902, + 0x3cc2, + 0xd2fc2, + 0x3c02, + 0xefa02, + 0x3e242, + 0x4ac2, + 0x2e02, + 0x4c3c2, + 0x37583, + 0x1f02, + 0x1b02, + 0x8882, + 0x12c42, + 0x1402, + 0x35c02, + 0x552c2, + 0x6202, + 0x3882, + 0x1342, + 0x14bc3, + 0x5842, + 0x1102, + 0xb0102, + 0x9602, + 0x1542, + 0x4482, + 0xe302, + 0x6f582, + 0x1e42, + 0x17c0c2, + 0x6cd82, + 0x3a3c2, + 0xec83, + 0x2002, + 0x53982, + 0x1382, + 0x6e02, + 0x29a85, + 0x8302, + 0x48602, + 0x44303, + 0x982, + 0x16f02, + 0x1282, + 0x4042, + 0xed42, + 0x2a82, + 0xc842, + 0x4182, + 0x1f8c5, + 0x582009c2, + 0x587696c3, + 0x1fbc3, + 0x58a009c2, + 0x1fbc3, + 0x179487, + 0x215383, + 0x200742, + 0x20be03, + 0x237583, + 0x203d43, + 0x201fc3, + 0x20be83, + 0x20ec83, + 0x20ae43, + 0x241d03, + 0x294a83, + 0x10ec3, + 0xcd588, + 0x20be03, + 0x237583, + 0x203d43, + 0x21f743, + 0x20ec83, + 0x20ae43, + 0xaff03, + 0x241d03, + 0x20be03, + 0x237583, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x200541, + 0x21f743, + 0x20ec83, + 0x228803, + 0x241d03, + 0x3744, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x21f6c3, + 0x203d43, + 0x257e43, + 0x26b143, + 0x2a2c83, + 0x280e83, + 0x30e843, + 0x26ff84, + 0x20ec83, + 0x241d03, + 0x203f83, + 0x31e084, + 0x250b03, + 0x5583, + 0x22d443, + 0x332388, + 0x325384, + 0x2023ca, + 0x238f06, + 0x10a904, + 0x37a147, + 0x22174a, + 0x21dd89, + 0x3ade07, + 0x3b628a, + 0x2b6c03, + 0x2d750b, + 0x293609, + 0x201bc5, + 0x34e347, + 0x2c42, + 0x20be03, + 0x214447, + 0x2fb145, + 0x2f2a09, + 0x237583, + 0x306a86, + 0x2c0c03, + 0xeae83, + 0x107f06, + 0x122746, + 0x13747, + 0x2176c6, + 0x227045, + 0x39e607, + 0x2d2107, + 0x5b30e843, + 0x33c8c7, + 0x371fc3, + 0x20fb85, + 0x26ff84, + 0x26ed48, + 0x36a50c, + 0x2ad885, + 0x2a2886, + 0x214307, + 0x345847, + 0x252e47, + 0x254d08, + 0x30600f, + 0x3371c5, + 0x246e07, + 0x37c407, + 0x2a424a, + 0x2cebc9, + 0x308045, + 0x30b7ca, + 0x136686, + 0x2c0c85, + 0x36c104, + 0x2bdf86, + 0x2f1a47, + 0x382947, + 0x348748, + 0x21b545, + 0x2fb046, + 0x21d105, + 0x36dd45, + 0x289684, + 0x326bc7, + 0x2eb24a, + 0x23fc48, + 0x366446, + 0xbe83, + 0x2d8345, + 0x318a86, + 0x3b4e86, + 0x34a246, + 0x21f743, + 0x399787, + 0x37c385, + 0x20ec83, + 0x2d58cd, + 0x20ae43, + 0x348848, + 0x38c844, + 0x275b05, + 0x2a4146, + 0x23d186, + 0x355b07, + 0x204a07, + 0x289085, + 0x241d03, + 0x3268c7, + 0x3650c9, + 0x340a49, + 0x30dc0a, + 0x249a82, + 0x20fb44, + 0x2de284, + 0x349b07, + 0x398208, + 0x2e4f89, + 0x221a49, + 0x2e5dc7, + 0x35c346, + 0xe8346, + 0x2e9484, + 0x2e9a8a, + 0x2edc08, + 0x2efc09, + 0x2de106, + 0x2b1985, + 0x23fb08, + 0x2c358a, + 0x2b5d83, + 0x31e206, + 0x2e5ec7, + 0x2311c5, + 0x38c705, + 0x3a26c3, + 0x2702c4, + 0x22b985, + 0x282ac7, + 0x2ff185, + 0x337c86, + 0x14aa05, + 0x2a3203, + 0x3b8a09, + 0x2758cc, + 0x2ca74c, + 0x2cbb08, + 0x2baec7, + 0x2fbc08, + 0x2fc24a, + 0x2fcc4b, + 0x293748, + 0x23c908, + 0x23d286, + 0x201985, + 0x320fca, + 0x369705, + 0x20e982, + 0x3c0507, + 0x261146, + 0x367405, + 0x36b749, + 0x277385, + 0x36d785, + 0x35c909, + 0x3189c6, + 0x3b6988, + 0x20fc43, + 0x217806, + 0x274c46, + 0x30a485, + 0x30a489, + 0x2e56c9, + 0x251b47, + 0x10c984, + 0x30c987, + 0x221949, + 0x23d605, + 0x40788, + 0x346245, + 0x332285, + 0x3c1309, + 0x203402, + 0x250904, + 0x203c82, + 0x205842, + 0x3c0d85, + 0x30a688, + 0x2b7c05, + 0x2c2483, + 0x2c2485, + 0x2ce1c3, + 0x20ff02, + 0x208f84, + 0x2c7d03, + 0x206f02, + 0x340484, + 0x2def43, + 0x204882, + 0x21fc43, + 0x28cb84, + 0x2f01c3, + 0x256784, + 0x200a02, + 0x219443, + 0x21d403, + 0x206a42, + 0x2f4682, + 0x2e5509, + 0x20ad42, + 0x2889c4, + 0x200442, + 0x23f984, + 0x35c304, + 0x287304, + 0x204182, + 0x23c542, + 0x3295c3, + 0x23b943, + 0x24d644, + 0x2b5c04, + 0x2eddc4, + 0x30b6c4, + 0x309043, + 0x335a83, + 0x336604, + 0x30eac4, + 0x30f306, + 0x2145c2, + 0x202c42, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x200742, + 0x2b6c03, + 0x20be03, + 0x237583, + 0x201b03, + 0x30e843, + 0x26ff84, + 0x2e57c4, + 0x226444, + 0x20ec83, + 0x241d03, + 0x219543, + 0x2ea0c4, + 0x31a803, + 0x2a6503, + 0x36aac4, + 0x346046, + 0x20b583, + 0x15da87, + 0x22fac3, + 0x21e903, + 0x2b0c43, + 0x20fbc3, + 0x20be83, + 0x339d45, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x210e03, + 0x2333c3, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x214bc3, + 0x20ec83, + 0x238184, + 0xaff03, + 0x241d03, + 0x209944, + 0x2bdd85, + 0x15da87, + 0x202c42, + 0x209d42, + 0x202542, + 0x2032c2, + 0xae43, + 0x200342, + 0x20be03, + 0x23d744, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x226444, + 0x20ec83, + 0xae43, + 0x241d03, + 0x207c03, + 0x28b304, + 0xcd588, + 0x20be03, + 0x20ae43, + 0x10ec3, + 0x13b5c4, + 0x253404, + 0xcd588, + 0x20be03, + 0x254a04, + 0x26ff84, + 0x20ae43, + 0x2056c2, + 0x241d03, + 0x24f3c3, + 0x702c4, + 0x2f5805, + 0x20e982, + 0x30ec03, + 0xf89, + 0xd3686, + 0xfcc8, + 0x200742, + 0xcd588, + 0x202c42, + 0x237583, + 0x30e843, + 0x201342, + 0xae43, + 0x241d03, + 0x200742, + 0x1b6447, + 0x11c889, + 0x5483, + 0xcd588, + 0x1226c3, + 0x5f33d587, + 0xbe03, + 0x1c6548, + 0x237583, + 0x30e843, + 0x178d46, + 0x214bc3, + 0x5b388, + 0xc0248, + 0x40e06, + 0x21f743, + 0xc6788, + 0x97c03, + 0xdbd45, + 0x37787, + 0xec83, + 0x6c83, + 0x41d03, + 0x4bc2, + 0x16c18a, + 0x1c0e43, + 0x30c5c4, + 0x105e0b, + 0x1063c8, + 0x8d302, + 0x200742, + 0x202c42, + 0x20be03, + 0x237583, + 0x2d2484, + 0x30e843, + 0x214bc3, + 0x21f743, + 0x20ec83, + 0x20be03, + 0x237583, + 0x30e843, + 0x20be83, + 0x20ec83, + 0x241d03, + 0x209943, + 0x207c03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x10ec3, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x20be83, + 0x20ec83, + 0x241d03, + 0x230882, + 0x200101, + 0x200742, + 0x200301, + 0x32a782, + 0xcd588, + 0x21fe85, + 0x200781, + 0xbe03, + 0x2014c1, + 0x200041, + 0x200141, + 0x24e082, + 0x378184, + 0x24e083, + 0x201401, + 0x200901, + 0x200541, + 0x200a81, + 0x316307, + 0x337dcf, + 0x2fa486, + 0x200641, + 0x33b606, + 0x200081, + 0x2001c1, + 0x3c35ce, + 0x200341, + 0x241d03, + 0x201681, + 0x254285, + 0x204bc2, + 0x3a25c5, + 0x2002c1, + 0x200a01, + 0x200401, + 0x20e982, + 0x200441, + 0x203f81, + 0x20d601, + 0x201181, + 0x200dc1, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x21a003, + 0x20be03, + 0x30e843, + 0x8d248, + 0x21f743, + 0x20ec83, + 0x5e8c3, + 0x241d03, + 0x14e0f48, + 0x10d08, + 0xcd588, + 0xae43, + 0x24704, + 0x4cec4, + 0x14e0f4a, + 0xcd588, + 0xaff03, + 0x20be03, + 0x237583, + 0x30e843, + 0x20ec83, + 0x241d03, + 0x205583, + 0xcd588, + 0x20be03, + 0x237583, + 0x2d2484, + 0x241d03, + 0x252385, + 0x317c44, + 0x20be03, + 0x20ec83, + 0x241d03, + 0x2fc8a, + 0xfd504, + 0x112a46, + 0x202c42, + 0x20be03, + 0x234d09, + 0x237583, + 0x2a82c9, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x2e9288, + 0x397b87, + 0x2f5805, + 0x1b7888, + 0x1b6447, + 0x19848a, + 0x101d0b, + 0x13b847, + 0x45388, + 0x3a80a, + 0x13dc8, + 0x11c889, + 0x2b847, + 0x67fc7, + 0x1c28c8, + 0x1c6548, + 0x470cf, + 0x26505, + 0x1c6847, + 0x178d46, + 0x4c207, + 0x108186, + 0x5b388, + 0x9b986, + 0x1187c7, + 0x142349, + 0x1b5207, + 0xe68c9, + 0xb8209, + 0xbdb06, + 0xc0248, + 0xbeb45, + 0x77fca, + 0xc6788, + 0x97c03, + 0xcea08, + 0x37787, + 0x1ac045, + 0x4dc90, + 0x6c83, + 0xaff03, + 0x1c3147, + 0x1d5c5, + 0xe4308, + 0x605c5, + 0x1c0e43, + 0x142748, + 0x132146, + 0x199bc9, + 0xaab87, + 0x124b, + 0x137a84, + 0xfe3c4, + 0x105e0b, + 0x1063c8, + 0x107e07, + 0x117485, + 0x20be03, + 0x237583, + 0x203d43, + 0x241d03, + 0x244a03, + 0x30e843, + 0xaff03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x7e24b, + 0x200742, + 0x202c42, + 0x241d03, + 0xcd588, + 0x200742, + 0x202c42, + 0x202542, + 0x201342, + 0x200b82, + 0x20ec83, + 0x200342, + 0x200742, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x237583, + 0x202542, + 0x30e843, + 0x214bc3, + 0x21f743, + 0x226444, + 0x20ec83, + 0x207783, + 0x241d03, + 0x30c5c4, + 0x203f83, + 0x30e843, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x20ae43, + 0x241d03, + 0x3afd87, + 0x20be03, + 0x279947, + 0x2e6686, + 0x216543, + 0x208883, + 0x30e843, + 0x204d03, + 0x26ff84, + 0x38b344, + 0x2b9906, + 0x20c743, + 0x20ec83, + 0x241d03, + 0x252385, + 0x309e84, + 0x320dc3, + 0x20d203, + 0x3c0507, + 0x23b1c5, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x98747, + 0x2149c2, + 0x26e443, + 0x20df43, + 0x2b6c03, + 0x6760be03, + 0x20b2c2, + 0x237583, + 0x206c03, + 0x30e843, + 0x26ff84, + 0x3c32c3, + 0x3371c3, + 0x21f743, + 0x226444, + 0x67a02f02, + 0x20ec83, + 0x241d03, + 0x235cc3, + 0x214c43, + 0x230882, + 0x203f83, + 0xcd588, + 0x30e843, + 0x10ec3, + 0x31b0c4, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x23d744, + 0x237583, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x39e304, + 0x21a484, + 0x2d0e06, + 0x226444, + 0x20ec83, + 0x241d03, + 0x219543, + 0x261146, + 0x4170b, + 0x29546, + 0xeb94a, + 0x10b34a, + 0xcd588, + 0x21d0c4, + 0x68e0be03, + 0x2b6bc4, + 0x237583, + 0x268984, + 0x30e843, + 0x357d43, + 0x21f743, + 0x20ec83, + 0xaff03, + 0x241d03, + 0x55a43, + 0x33840b, + 0x3b1a0a, + 0x3c580c, + 0xd80c8, + 0x200742, + 0x202c42, + 0x202542, + 0x232585, + 0x26ff84, + 0x201e42, + 0x21f743, + 0x21a484, + 0x2032c2, + 0x200342, + 0x207c02, + 0x230882, + 0xb6c03, + 0x4d5c2, + 0x386389, + 0x3a3088, + 0x310449, + 0x34e049, + 0x23e48a, + 0x24e90a, + 0x219382, + 0x2efa02, + 0x2c42, + 0x20be03, + 0x230a42, + 0x246fc6, + 0x368802, + 0x207582, + 0x30208e, + 0x21948e, + 0x27bf47, + 0x20ec07, + 0x2e89c2, + 0x237583, + 0x30e843, + 0x209182, + 0x201342, + 0x6ff83, + 0x23d94f, + 0x247302, + 0x2f9507, + 0x2ad447, + 0x314407, + 0x2b0fcc, + 0x2b9b8c, + 0x207144, + 0x27d34a, + 0x2193c2, + 0x209602, + 0x2b9844, + 0x2028c2, + 0x2c10c2, + 0x2b9dc4, + 0x217f42, + 0x201542, + 0xf003, + 0x29ba07, + 0x233805, + 0x20e302, + 0x24c184, + 0x37c0c2, + 0x2d7c88, + 0x20ec83, + 0x39f108, + 0x206a82, + 0x207305, + 0x388206, + 0x241d03, + 0x208302, + 0x2e51c7, + 0x4bc2, + 0x272585, + 0x204905, + 0x212182, + 0x2030c2, + 0x293c0a, + 0x288f0a, + 0x23a382, + 0x29a184, + 0x2040c2, + 0x20fa08, + 0x200d82, + 0x39d588, + 0x302ac7, + 0x3038c9, + 0x204982, + 0x3086c5, + 0x36ba05, + 0x21b60b, + 0x2c418c, + 0x230548, + 0x31c188, + 0x2145c2, + 0x355bc2, + 0x200742, + 0xcd588, + 0x202c42, + 0x20be03, + 0x202542, + 0x2032c2, + 0xae43, + 0x200342, + 0x241d03, + 0x207c02, + 0x200742, + 0x6a202c42, + 0x6a70e843, + 0x20f003, + 0x201e42, + 0x20ec83, + 0x338c03, + 0x241d03, + 0x2e2d83, + 0x379586, + 0x1607c03, + 0xcd588, + 0x6e247, + 0x14a345, + 0xa7e0d, + 0xa5f4a, + 0x85047, + 0x6ae00a42, + 0x6b200602, + 0x6b600282, + 0x6ba02b82, + 0x6be12442, + 0x6c203cc2, + 0x15da87, + 0x6c602c42, + 0x6ca1b282, + 0x6ce1f9c2, + 0x6d202e02, + 0x219483, + 0x22644, + 0x282dc3, + 0x6d615902, + 0x6da039c2, + 0x55087, + 0x6de02202, + 0x6e200902, + 0x6e600542, + 0x6ea07d02, + 0x6ee03882, + 0x6f201342, + 0xc0f85, + 0x24c4c3, + 0x23a2c4, + 0x6f6028c2, + 0x6fa0dd82, + 0x6fe00682, + 0xb714b, + 0x702000c2, + 0x70a54ac2, + 0x70e01e42, + 0x71200b82, + 0x71603282, + 0x71a05a02, + 0x71e0d682, + 0x7226cd82, + 0x72602f02, + 0x72a04d42, + 0x72e032c2, + 0x7323e0c2, + 0x7362a402, + 0x73a11e82, + 0xafd44, + 0x339b43, + 0x73e0e882, + 0x742190c2, + 0x74606482, + 0x74a02882, + 0x74e00342, + 0x75206f02, + 0x7e3c7, + 0x756057c2, + 0x75a00502, + 0x75e07c02, + 0x76209f82, + 0xf778c, + 0x76627882, + 0x76a2c0c2, + 0x76e0a902, + 0x77206d02, + 0x77611d82, + 0x77a3e602, + 0x77e0fc42, + 0x78213802, + 0x78674fc2, + 0x78a4f1c2, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x11343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x707c32c3, + 0x211343, + 0x339dc4, + 0x3a2f86, + 0x2f0ec3, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x200189, + 0x24d5c2, + 0x391283, + 0x2b8503, + 0x202f85, + 0x206c03, + 0x3c32c3, + 0x211343, + 0x29ea43, + 0x233d43, + 0x3bd849, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x24d5c2, + 0x24d5c2, + 0x3c32c3, + 0x211343, + 0x7920be03, + 0x237583, + 0x332683, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0xcd588, + 0x202c42, + 0x20be03, + 0x20ec83, + 0x241d03, + 0x20be03, + 0x237583, + 0x30e843, + 0x21f743, + 0x20ec83, + 0xae43, + 0x241d03, + 0x253404, + 0x202c42, + 0x20be03, + 0x322183, + 0x237583, + 0x254a04, + 0x203d43, + 0x30e843, + 0x26ff84, + 0x214bc3, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x24f3c3, + 0x2f5805, + 0x233d43, + 0x203f83, + 0xae43, + 0x202c42, + 0x20be03, + 0x3c32c3, + 0x20ec83, + 0x241d03, + 0x200742, + 0x2b6c03, + 0xcd588, + 0x20be03, + 0x237583, + 0x30e843, + 0x234106, + 0x26ff84, + 0x214bc3, + 0x226444, + 0x20ec83, + 0x241d03, + 0x219543, + 0x20be03, + 0x237583, + 0x20ec83, + 0x241d03, + 0x144be47, + 0x20be03, + 0x29546, + 0x237583, + 0x30e843, + 0xd91c6, + 0x20ec83, + 0x241d03, + 0x318fc8, + 0x31bfc9, + 0x330349, + 0x33af08, + 0x38a348, + 0x38a349, + 0x22c10d, + 0x24b00f, + 0x2ec510, + 0x354d0d, + 0x36ebcc, + 0x38bc4b, + 0xaf148, + 0xc7bc5, + 0x200742, + 0x23b005, + 0x20bfc3, + 0x7c602c42, + 0x237583, + 0x30e843, + 0x2d7ac7, + 0x20fbc3, + 0x21f743, + 0x20ec83, + 0x228803, + 0x20c0c3, + 0x20ae43, + 0x241d03, + 0x253786, + 0x20e982, + 0x203f83, + 0xcd588, + 0x200742, + 0x2b6c03, + 0x202c42, + 0x20be03, + 0x237583, + 0x30e843, + 0x26ff84, + 0x21f743, + 0x20ec83, + 0x241d03, + 0x207c03, + 0xf84, + 0x154ab06, + 0x200742, + 0x202c42, + 0x30e843, + 0x21f743, + 0x241d03, +} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [ 1 bits] unused +// [ 1 bits] wildcard bit +// [ 2 bits] node type +// [14 bits] high nodes index (exclusive) of children +// [14 bits] low nodes index (inclusive) of children +var children = [...]uint32{ + 0x0, + 0x10000000, + 0x20000000, + 0x40000000, + 0x50000000, + 0x60000000, + 0x185460f, + 0x1858615, + 0x187c616, + 0x19d861f, + 0x19ec676, + 0x1a0067b, + 0x1a14680, + 0x1a34685, + 0x1a3868d, + 0x1a5068e, + 0x1a78694, + 0x1a7c69e, + 0x1a9469f, + 0x1a986a5, + 0x1a9c6a6, + 0x1ad86a7, + 0x1adc6b6, + 0x21ae46b7, + 0x1b2c6b9, + 0x1b306cb, + 0x1b506cc, + 0x1b646d4, + 0x1b686d9, + 0x1b986da, + 0x1bb46e6, + 0x1bdc6ed, + 0x1bec6f7, + 0x1bf06fb, + 0x1c886fc, + 0x1c9c722, + 0x1cb0727, + 0x1ce072c, + 0x1cf0738, + 0x1d0473c, + 0x1da8741, + 0x1fa076a, + 0x1fa47e8, + 0x20107e9, + 0x207c804, + 0x209481f, + 0x20a8825, + 0x20b082a, + 0x20c482c, + 0x20c8831, + 0x20e4832, + 0x2134839, + 0x215084d, + 0x2154854, + 0x2158855, + 0x2174856, + 0x21b085d, + 0x621b486c, + 0x21cc86d, + 0x21e0873, + 0x21e4878, + 0x21f4879, + 0x22a487d, + 0x22a88a9, + 0x222b88aa, + 0x222bc8ae, + 0x222c08af, + 0x22f88b0, + 0x22fc8be, + 0x278c8bf, + 0x228349e3, + 0x22838a0d, + 0x2283ca0e, + 0x22848a0f, + 0x2284ca12, + 0x22858a13, + 0x2285ca16, + 0x22860a17, + 0x22864a18, + 0x22868a19, + 0x2286ca1a, + 0x22878a1b, + 0x2287ca1e, + 0x22888a1f, + 0x2288ca22, + 0x22890a23, + 0x22894a24, + 0x228a0a25, + 0x228a4a28, + 0x228b0a29, + 0x228b4a2c, + 0x228b8a2d, + 0x228bca2e, + 0x28c0a2f, + 0x228c4a30, + 0x228d0a31, + 0x228d4a34, + 0x28dca35, + 0x291ca37, + 0x2293ca47, + 0x22940a4f, + 0x22944a50, + 0x2948a51, + 0x2294ca52, + 0x2950a53, + 0x296ca54, + 0x2984a5b, + 0x2988a61, + 0x2998a62, + 0x29a4a66, + 0x29d8a69, + 0x29dca76, + 0x29f0a77, + 0x229f8a7c, + 0x2ab8a7e, + 0x22abcaae, + 0x2ac4aaf, + 0x2ac8ab1, + 0x2ae0ab2, + 0x2af4ab8, + 0x2b1cabd, + 0x2b3cac7, + 0x2b6cacf, + 0x2b94adb, + 0x2b98ae5, + 0x2bbcae6, + 0x2bc0aef, + 0x2bd4af0, + 0x2bd8af5, + 0x2bdcaf6, + 0x2bfcaf7, + 0x2c1caff, + 0x2c20b07, + 0x22c24b08, + 0x2c28b09, + 0x2c2cb0a, + 0x2c3cb0b, + 0x2c40b0f, + 0x2cb8b10, + 0x2cbcb2e, + 0x2cd8b2f, + 0x2ce8b36, + 0x2cfcb3a, + 0x2d14b3f, + 0x2d2cb45, + 0x2d44b4b, + 0x2d48b51, + 0x2d60b52, + 0x2d7cb58, + 0x2d9cb5f, + 0x2db4b67, + 0x2e14b6d, + 0x2e30b85, + 0x2e38b8c, + 0x2e3cb8e, + 0x2e50b8f, + 0x2e94b94, + 0x2f14ba5, + 0x2f40bc5, + 0x2f44bd0, + 0x2f4cbd1, + 0x2f6cbd3, + 0x2f70bdb, + 0x2f94bdc, + 0x2f9cbe5, + 0x2fd8be7, + 0x301cbf6, + 0x3020c07, + 0x3094c08, + 0x3098c25, + 0x2309cc26, + 0x230a0c27, + 0x230a4c28, + 0x230b4c29, + 0x230b8c2d, + 0x230bcc2e, + 0x230c0c2f, + 0x230c4c30, + 0x30dcc31, + 0x3100c37, + 0x3120c40, + 0x36e4c48, + 0x36f0db9, + 0x3710dbc, + 0x38ccdc4, + 0x399ce33, + 0x3a0ce67, + 0x3a64e83, + 0x3b4ce99, + 0x3ba4ed3, + 0x3be0ee9, + 0x3cdcef8, + 0x3da8f37, + 0x3e40f6a, + 0x3ed0f90, + 0x3f34fb4, + 0x416cfcd, + 0x422505b, + 0x42f1089, + 0x433d0bc, + 0x43c50cf, + 0x44010f1, + 0x4451100, + 0x44c9114, + 0x644cd132, + 0x644d1133, + 0x644d5134, + 0x4551135, + 0x45ad154, + 0x462916b, + 0x46a118a, + 0x47211a8, + 0x478d1c8, + 0x48b91e3, + 0x491122e, + 0x64915244, + 0x49ad245, + 0x4a3526b, + 0x4a8128d, + 0x4ae92a0, + 0x4b912ba, + 0x4c592e4, + 0x4cc1316, + 0x4dd5330, + 0x64dd9375, + 0x64ddd376, + 0x4e39377, + 0x4e9538e, + 0x4f253a5, + 0x4fa13c9, + 0x4fe53e8, + 0x50c93f9, + 0x50fd432, + 0x515d43f, + 0x51d1457, + 0x5259474, + 0x5299496, + 0x53094a6, + 0x6530d4c2, + 0x53314c3, + 0x53354cc, + 0x534d4cd, + 0x53694d3, + 0x53ad4da, + 0x53bd4eb, + 0x53d54ef, + 0x544d4f5, + 0x5455513, + 0x5469515, + 0x548551a, + 0x54b1521, + 0x54b552c, + 0x54bd52d, + 0x54d152f, + 0x54ed534, + 0x54f953b, + 0x550153e, + 0x553d540, + 0x555154f, + 0x5559554, + 0x5565556, + 0x556d559, + 0x559155b, + 0x55b5564, + 0x55cd56d, + 0x55d1573, + 0x55d9574, + 0x55dd576, + 0x5645577, + 0x5649591, + 0x566d592, + 0x569159b, + 0x56ad5a4, + 0x56bd5ab, + 0x56d15af, + 0x56d55b4, + 0x56dd5b5, + 0x56f15b7, + 0x57015bc, + 0x57055c0, + 0x57215c1, + 0x5fb15c8, + 0x5fe97ec, + 0x60157fa, + 0x6031805, + 0x605180c, + 0x6071814, + 0x60b581c, + 0x60bd82d, + 0x260c182f, + 0x260c5830, + 0x60cd831, + 0x6245833, + 0x26249891, + 0x26259892, + 0x26261896, + 0x2626d898, + 0x627189b, + 0x627589c, + 0x629d89d, + 0x62c58a7, + 0x62c98b1, + 0x63018b2, + 0x63218c0, + 0x6e798c8, + 0x6e7db9e, + 0x6e81b9f, + 0x26e85ba0, + 0x6e89ba1, + 0x26e8dba2, + 0x6e91ba3, + 0x26e9dba4, + 0x6ea1ba7, + 0x6ea5ba8, + 0x26ea9ba9, + 0x6eadbaa, + 0x26eb5bab, + 0x6eb9bad, + 0x6ebdbae, + 0x26ecdbaf, + 0x6ed1bb3, + 0x6ed5bb4, + 0x6ed9bb5, + 0x6eddbb6, + 0x26ee1bb7, + 0x6ee5bb8, + 0x6ee9bb9, + 0x6eedbba, + 0x6ef1bbb, + 0x26ef9bbc, + 0x6efdbbe, + 0x6f01bbf, + 0x6f05bc0, + 0x26f09bc1, + 0x6f0dbc2, + 0x26f15bc3, + 0x26f19bc5, + 0x6f35bc6, + 0x6f45bcd, + 0x6f89bd1, + 0x6f8dbe2, + 0x6fb1be3, + 0x6fb5bec, + 0x6fb9bed, + 0x7145bee, + 0x27149c51, + 0x27151c52, + 0x27155c54, + 0x27159c55, + 0x7161c56, + 0x723dc58, + 0x27249c8f, + 0x2724dc92, + 0x27251c93, + 0x27255c94, + 0x7259c95, + 0x7285c96, + 0x7289ca1, + 0x72adca2, + 0x72b9cab, + 0x72d9cae, + 0x72ddcb6, + 0x7315cb7, + 0x75adcc5, + 0x7669d6b, + 0x767dd9a, + 0x76b1d9f, + 0x76e1dac, + 0x76fddb8, + 0x7725dbf, + 0x7745dc9, + 0x7761dd1, + 0x7789dd8, + 0x7799de2, + 0x779dde6, + 0x77a1de7, + 0x77d5de8, + 0x77e1df5, + 0x7801df8, + 0x7879e00, + 0x2787de1e, + 0x78a1e1f, + 0x78c1e28, + 0x78d5e30, + 0x78e9e35, + 0x78ede3a, + 0x790de3b, + 0x79b1e43, + 0x79cde6c, + 0x79f1e73, + 0x79f9e7c, + 0x7a05e7e, + 0x7a0de81, + 0x7a21e83, + 0x7a41e88, + 0x7a4de90, + 0x7a59e93, + 0x7a89e96, + 0x7b5dea2, + 0x7b61ed7, + 0x7b75ed8, + 0x7b7dedd, + 0x7b95edf, + 0x7b99ee5, + 0x7ba5ee6, + 0x7ba9ee9, + 0x7bc5eea, + 0x7c01ef1, + 0x7c05f00, + 0x7c25f01, + 0x7c75f09, + 0x7c91f1d, + 0x7ce5f24, + 0x7ce9f39, + 0x7cedf3a, + 0x7cf1f3b, + 0x7d35f3c, + 0x7d45f4d, + 0x7d85f51, + 0x7d89f61, + 0x7db9f62, + 0x7f01f6e, + 0x7f29fc0, + 0x7f55fca, + 0x7f65fd5, + 0x7f6dfd9, + 0x807dfdb, + 0x808a01f, + 0x8096022, + 0x80a2025, + 0x80ae028, + 0x80ba02b, + 0x80c602e, + 0x80d2031, + 0x80de034, + 0x80ea037, + 0x80f603a, + 0x810203d, + 0x810e040, + 0x811a043, + 0x8122046, + 0x812e048, + 0x813a04b, + 0x814604e, + 0x8152051, + 0x815e054, + 0x816a057, + 0x817605a, + 0x818205d, + 0x818e060, + 0x819a063, + 0x81a6066, + 0x81d2069, + 0x81de074, + 0x81ea077, + 0x81f607a, + 0x820207d, + 0x820e080, + 0x8216083, + 0x8222085, + 0x822e088, + 0x823a08b, + 0x824608e, + 0x8252091, + 0x825e094, + 0x826a097, + 0x827609a, + 0x828209d, + 0x828e0a0, + 0x829a0a3, + 0x82a60a6, + 0x82b20a9, + 0x82ba0ac, + 0x82c60ae, + 0x82d20b1, + 0x82de0b4, + 0x82ea0b7, + 0x82f60ba, + 0x83020bd, + 0x830e0c0, + 0x831a0c3, + 0x831e0c6, + 0x832a0c7, + 0x83460ca, + 0x834a0d1, + 0x835a0d2, + 0x83760d6, + 0x83ba0dd, + 0x83be0ee, + 0x83d20ef, + 0x84060f4, + 0x8416101, + 0x8436105, + 0x844e10d, + 0x8466113, + 0x846e119, + 0x284b211b, + 0x84b612c, + 0x84e212d, + 0x84ea138, + 0x84fe13a, +} + +// max children 500 (capacity 1023) +// max text offset 29102 (capacity 32767) +// max text length 36 (capacity 63) +// max hi 8511 (capacity 16383) +// max lo 8506 (capacity 16383) diff --git a/vendor/golang.org/x/net/publicsuffix/table_test.go b/vendor/golang.org/x/net/publicsuffix/table_test.go new file mode 100644 index 000000000..228010cae --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/table_test.go @@ -0,0 +1,16959 @@ +// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +var rules = [...]string{ + "ac", + "com.ac", + "edu.ac", + "gov.ac", + "net.ac", + "mil.ac", + "org.ac", + "ad", + "nom.ad", + "ae", + "co.ae", + "net.ae", + "org.ae", + "sch.ae", + "ac.ae", + "gov.ae", + "mil.ae", + "aero", + "accident-investigation.aero", + "accident-prevention.aero", + "aerobatic.aero", + "aeroclub.aero", + "aerodrome.aero", + "agents.aero", + "aircraft.aero", + "airline.aero", + "airport.aero", + "air-surveillance.aero", + "airtraffic.aero", + "air-traffic-control.aero", + "ambulance.aero", + "amusement.aero", + "association.aero", + "author.aero", + "ballooning.aero", + "broker.aero", + "caa.aero", + "cargo.aero", + "catering.aero", + "certification.aero", + "championship.aero", + "charter.aero", + "civilaviation.aero", + "club.aero", + "conference.aero", + "consultant.aero", + "consulting.aero", + "control.aero", + "council.aero", + "crew.aero", + "design.aero", + "dgca.aero", + "educator.aero", + "emergency.aero", + "engine.aero", + "engineer.aero", + "entertainment.aero", + "equipment.aero", + "exchange.aero", + "express.aero", + "federation.aero", + "flight.aero", + "freight.aero", + "fuel.aero", + "gliding.aero", + "government.aero", + "groundhandling.aero", + "group.aero", + "hanggliding.aero", + "homebuilt.aero", + "insurance.aero", + "journal.aero", + "journalist.aero", + "leasing.aero", + "logistics.aero", + "magazine.aero", + "maintenance.aero", + "media.aero", + "microlight.aero", + "modelling.aero", + "navigation.aero", + "parachuting.aero", + "paragliding.aero", + "passenger-association.aero", + "pilot.aero", + "press.aero", + "production.aero", + "recreation.aero", + "repbody.aero", + "res.aero", + "research.aero", + "rotorcraft.aero", + "safety.aero", + "scientist.aero", + "services.aero", + "show.aero", + "skydiving.aero", + "software.aero", + "student.aero", + "trader.aero", + "trading.aero", + "trainer.aero", + "union.aero", + "workinggroup.aero", + "works.aero", + "af", + "gov.af", + "com.af", + "org.af", + "net.af", + "edu.af", + "ag", + "com.ag", + "org.ag", + "net.ag", + "co.ag", + "nom.ag", + "ai", + "off.ai", + "com.ai", + "net.ai", + "org.ai", + "al", + "com.al", + "edu.al", + "gov.al", + "mil.al", + "net.al", + "org.al", + "am", + "ao", + "ed.ao", + "gv.ao", + "og.ao", + "co.ao", + "pb.ao", + "it.ao", + "aq", + "ar", + "com.ar", + "edu.ar", + "gob.ar", + "gov.ar", + "int.ar", + "mil.ar", + "musica.ar", + "net.ar", + "org.ar", + "tur.ar", + "arpa", + "e164.arpa", + "in-addr.arpa", + "ip6.arpa", + "iris.arpa", + "uri.arpa", + "urn.arpa", + "as", + "gov.as", + "asia", + "at", + "ac.at", + "co.at", + "gv.at", + "or.at", + "au", + "com.au", + "net.au", + "org.au", + "edu.au", + "gov.au", + "asn.au", + "id.au", + "info.au", + "conf.au", + "oz.au", + "act.au", + "nsw.au", + "nt.au", + "qld.au", + "sa.au", + "tas.au", + "vic.au", + "wa.au", + "act.edu.au", + "nsw.edu.au", + "nt.edu.au", + "qld.edu.au", + "sa.edu.au", + "tas.edu.au", + "vic.edu.au", + "wa.edu.au", + "qld.gov.au", + "sa.gov.au", + "tas.gov.au", + "vic.gov.au", + "wa.gov.au", + "aw", + "com.aw", + "ax", + "az", + "com.az", + "net.az", + "int.az", + "gov.az", + "org.az", + "edu.az", + "info.az", + "pp.az", + "mil.az", + "name.az", + "pro.az", + "biz.az", + "ba", + "com.ba", + "edu.ba", + "gov.ba", + "mil.ba", + "net.ba", + "org.ba", + "bb", + "biz.bb", + "co.bb", + "com.bb", + "edu.bb", + "gov.bb", + "info.bb", + "net.bb", + "org.bb", + "store.bb", + "tv.bb", + "*.bd", + "be", + "ac.be", + "bf", + "gov.bf", + "bg", + "a.bg", + "b.bg", + "c.bg", + "d.bg", + "e.bg", + "f.bg", + "g.bg", + "h.bg", + "i.bg", + "j.bg", + "k.bg", + "l.bg", + "m.bg", + "n.bg", + "o.bg", + "p.bg", + "q.bg", + "r.bg", + "s.bg", + "t.bg", + "u.bg", + "v.bg", + "w.bg", + "x.bg", + "y.bg", + "z.bg", + "0.bg", + "1.bg", + "2.bg", + "3.bg", + "4.bg", + "5.bg", + "6.bg", + "7.bg", + "8.bg", + "9.bg", + "bh", + "com.bh", + "edu.bh", + "net.bh", + "org.bh", + "gov.bh", + "bi", + "co.bi", + "com.bi", + "edu.bi", + "or.bi", + "org.bi", + "biz", + "bj", + "asso.bj", + "barreau.bj", + "gouv.bj", + "bm", + "com.bm", + "edu.bm", + "gov.bm", + "net.bm", + "org.bm", + "*.bn", + "bo", + "com.bo", + "edu.bo", + "gob.bo", + "int.bo", + "org.bo", + "net.bo", + "mil.bo", + "tv.bo", + "web.bo", + "academia.bo", + "agro.bo", + "arte.bo", + "blog.bo", + "bolivia.bo", + "ciencia.bo", + "cooperativa.bo", + "democracia.bo", + "deporte.bo", + "ecologia.bo", + "economia.bo", + "empresa.bo", + "indigena.bo", + "industria.bo", + "info.bo", + "medicina.bo", + "movimiento.bo", + "musica.bo", + "natural.bo", + "nombre.bo", + "noticias.bo", + "patria.bo", + "politica.bo", + "profesional.bo", + "plurinacional.bo", + "pueblo.bo", + "revista.bo", + "salud.bo", + "tecnologia.bo", + "tksat.bo", + "transporte.bo", + "wiki.bo", + "br", + "9guacu.br", + "abc.br", + "adm.br", + "adv.br", + "agr.br", + "aju.br", + "am.br", + "anani.br", + "aparecida.br", + "arq.br", + "art.br", + "ato.br", + "b.br", + "belem.br", + "bhz.br", + "bio.br", + "blog.br", + "bmd.br", + "boavista.br", + "bsb.br", + "campinagrande.br", + "campinas.br", + "caxias.br", + "cim.br", + "cng.br", + "cnt.br", + "com.br", + "contagem.br", + "coop.br", + "cri.br", + "cuiaba.br", + "curitiba.br", + "def.br", + "ecn.br", + "eco.br", + "edu.br", + "emp.br", + "eng.br", + "esp.br", + "etc.br", + "eti.br", + "far.br", + "feira.br", + "flog.br", + "floripa.br", + "fm.br", + "fnd.br", + "fortal.br", + "fot.br", + "foz.br", + "fst.br", + "g12.br", + "ggf.br", + "goiania.br", + "gov.br", + "ac.gov.br", + "al.gov.br", + "am.gov.br", + "ap.gov.br", + "ba.gov.br", + "ce.gov.br", + "df.gov.br", + "es.gov.br", + "go.gov.br", + "ma.gov.br", + "mg.gov.br", + "ms.gov.br", + "mt.gov.br", + "pa.gov.br", + "pb.gov.br", + "pe.gov.br", + "pi.gov.br", + "pr.gov.br", + "rj.gov.br", + "rn.gov.br", + "ro.gov.br", + "rr.gov.br", + "rs.gov.br", + "sc.gov.br", + "se.gov.br", + "sp.gov.br", + "to.gov.br", + "gru.br", + "imb.br", + "ind.br", + "inf.br", + "jab.br", + "jampa.br", + "jdf.br", + "joinville.br", + "jor.br", + "jus.br", + "leg.br", + "lel.br", + "londrina.br", + "macapa.br", + "maceio.br", + "manaus.br", + "maringa.br", + "mat.br", + "med.br", + "mil.br", + "morena.br", + "mp.br", + "mus.br", + "natal.br", + "net.br", + "niteroi.br", + "*.nom.br", + "not.br", + "ntr.br", + "odo.br", + "org.br", + "osasco.br", + "palmas.br", + "poa.br", + "ppg.br", + "pro.br", + "psc.br", + "psi.br", + "pvh.br", + "qsl.br", + "radio.br", + "rec.br", + "recife.br", + "ribeirao.br", + "rio.br", + "riobranco.br", + "riopreto.br", + "salvador.br", + "sampa.br", + "santamaria.br", + "santoandre.br", + "saobernardo.br", + "saogonca.br", + "sjc.br", + "slg.br", + "slz.br", + "sorocaba.br", + "srv.br", + "taxi.br", + "teo.br", + "the.br", + "tmp.br", + "trd.br", + "tur.br", + "tv.br", + "udi.br", + "vet.br", + "vix.br", + "vlog.br", + "wiki.br", + "zlg.br", + "bs", + "com.bs", + "net.bs", + "org.bs", + "edu.bs", + "gov.bs", + "bt", + "com.bt", + "edu.bt", + "gov.bt", + "net.bt", + "org.bt", + "bv", + "bw", + "co.bw", + "org.bw", + "by", + "gov.by", + "mil.by", + "com.by", + "of.by", + "bz", + "com.bz", + "net.bz", + "org.bz", + "edu.bz", + "gov.bz", + "ca", + "ab.ca", + "bc.ca", + "mb.ca", + "nb.ca", + "nf.ca", + "nl.ca", + "ns.ca", + "nt.ca", + "nu.ca", + "on.ca", + "pe.ca", + "qc.ca", + "sk.ca", + "yk.ca", + "gc.ca", + "cat", + "cc", + "cd", + "gov.cd", + "cf", + "cg", + "ch", + "ci", + "org.ci", + "or.ci", + "com.ci", + "co.ci", + "edu.ci", + "ed.ci", + "ac.ci", + "net.ci", + "go.ci", + "asso.ci", + "xn--aroport-bya.ci", + "int.ci", + "presse.ci", + "md.ci", + "gouv.ci", + "*.ck", + "!www.ck", + "cl", + "gov.cl", + "gob.cl", + "co.cl", + "mil.cl", + "cm", + "co.cm", + "com.cm", + "gov.cm", + "net.cm", + "cn", + "ac.cn", + "com.cn", + "edu.cn", + "gov.cn", + "net.cn", + "org.cn", + "mil.cn", + "xn--55qx5d.cn", + "xn--io0a7i.cn", + "xn--od0alg.cn", + "ah.cn", + "bj.cn", + "cq.cn", + "fj.cn", + "gd.cn", + "gs.cn", + "gz.cn", + "gx.cn", + "ha.cn", + "hb.cn", + "he.cn", + "hi.cn", + "hl.cn", + "hn.cn", + "jl.cn", + "js.cn", + "jx.cn", + "ln.cn", + "nm.cn", + "nx.cn", + "qh.cn", + "sc.cn", + "sd.cn", + "sh.cn", + "sn.cn", + "sx.cn", + "tj.cn", + "xj.cn", + "xz.cn", + "yn.cn", + "zj.cn", + "hk.cn", + "mo.cn", + "tw.cn", + "co", + "arts.co", + "com.co", + "edu.co", + "firm.co", + "gov.co", + "info.co", + "int.co", + "mil.co", + "net.co", + "nom.co", + "org.co", + "rec.co", + "web.co", + "com", + "coop", + "cr", + "ac.cr", + "co.cr", + "ed.cr", + "fi.cr", + "go.cr", + "or.cr", + "sa.cr", + "cu", + "com.cu", + "edu.cu", + "org.cu", + "net.cu", + "gov.cu", + "inf.cu", + "cv", + "cw", + "com.cw", + "edu.cw", + "net.cw", + "org.cw", + "cx", + "gov.cx", + "cy", + "ac.cy", + "biz.cy", + "com.cy", + "ekloges.cy", + "gov.cy", + "ltd.cy", + "name.cy", + "net.cy", + "org.cy", + "parliament.cy", + "press.cy", + "pro.cy", + "tm.cy", + "cz", + "de", + "dj", + "dk", + "dm", + "com.dm", + "net.dm", + "org.dm", + "edu.dm", + "gov.dm", + "do", + "art.do", + "com.do", + "edu.do", + "gob.do", + "gov.do", + "mil.do", + "net.do", + "org.do", + "sld.do", + "web.do", + "dz", + "com.dz", + "org.dz", + "net.dz", + "gov.dz", + "edu.dz", + "asso.dz", + "pol.dz", + "art.dz", + "ec", + "com.ec", + "info.ec", + "net.ec", + "fin.ec", + "k12.ec", + "med.ec", + "pro.ec", + "org.ec", + "edu.ec", + "gov.ec", + "gob.ec", + "mil.ec", + "edu", + "ee", + "edu.ee", + "gov.ee", + "riik.ee", + "lib.ee", + "med.ee", + "com.ee", + "pri.ee", + "aip.ee", + "org.ee", + "fie.ee", + "eg", + "com.eg", + "edu.eg", + "eun.eg", + "gov.eg", + "mil.eg", + "name.eg", + "net.eg", + "org.eg", + "sci.eg", + "*.er", + "es", + "com.es", + "nom.es", + "org.es", + "gob.es", + "edu.es", + "et", + "com.et", + "gov.et", + "org.et", + "edu.et", + "biz.et", + "name.et", + "info.et", + "net.et", + "eu", + "fi", + "aland.fi", + "*.fj", + "*.fk", + "fm", + "fo", + "fr", + "com.fr", + "asso.fr", + "nom.fr", + "prd.fr", + "presse.fr", + "tm.fr", + "aeroport.fr", + "assedic.fr", + "avocat.fr", + "avoues.fr", + "cci.fr", + "chambagri.fr", + "chirurgiens-dentistes.fr", + "experts-comptables.fr", + "geometre-expert.fr", + "gouv.fr", + "greta.fr", + "huissier-justice.fr", + "medecin.fr", + "notaires.fr", + "pharmacien.fr", + "port.fr", + "veterinaire.fr", + "ga", + "gb", + "gd", + "ge", + "com.ge", + "edu.ge", + "gov.ge", + "org.ge", + "mil.ge", + "net.ge", + "pvt.ge", + "gf", + "gg", + "co.gg", + "net.gg", + "org.gg", + "gh", + "com.gh", + "edu.gh", + "gov.gh", + "org.gh", + "mil.gh", + "gi", + "com.gi", + "ltd.gi", + "gov.gi", + "mod.gi", + "edu.gi", + "org.gi", + "gl", + "co.gl", + "com.gl", + "edu.gl", + "net.gl", + "org.gl", + "gm", + "gn", + "ac.gn", + "com.gn", + "edu.gn", + "gov.gn", + "org.gn", + "net.gn", + "gov", + "gp", + "com.gp", + "net.gp", + "mobi.gp", + "edu.gp", + "org.gp", + "asso.gp", + "gq", + "gr", + "com.gr", + "edu.gr", + "net.gr", + "org.gr", + "gov.gr", + "gs", + "gt", + "com.gt", + "edu.gt", + "gob.gt", + "ind.gt", + "mil.gt", + "net.gt", + "org.gt", + "*.gu", + "gw", + "gy", + "co.gy", + "com.gy", + "edu.gy", + "gov.gy", + "net.gy", + "org.gy", + "hk", + "com.hk", + "edu.hk", + "gov.hk", + "idv.hk", + "net.hk", + "org.hk", + "xn--55qx5d.hk", + "xn--wcvs22d.hk", + "xn--lcvr32d.hk", + "xn--mxtq1m.hk", + "xn--gmqw5a.hk", + "xn--ciqpn.hk", + "xn--gmq050i.hk", + "xn--zf0avx.hk", + "xn--io0a7i.hk", + "xn--mk0axi.hk", + "xn--od0alg.hk", + "xn--od0aq3b.hk", + "xn--tn0ag.hk", + "xn--uc0atv.hk", + "xn--uc0ay4a.hk", + "hm", + "hn", + "com.hn", + "edu.hn", + "org.hn", + "net.hn", + "mil.hn", + "gob.hn", + "hr", + "iz.hr", + "from.hr", + "name.hr", + "com.hr", + "ht", + "com.ht", + "shop.ht", + "firm.ht", + "info.ht", + "adult.ht", + "net.ht", + "pro.ht", + "org.ht", + "med.ht", + "art.ht", + "coop.ht", + "pol.ht", + "asso.ht", + "edu.ht", + "rel.ht", + "gouv.ht", + "perso.ht", + "hu", + "co.hu", + "info.hu", + "org.hu", + "priv.hu", + "sport.hu", + "tm.hu", + "2000.hu", + "agrar.hu", + "bolt.hu", + "casino.hu", + "city.hu", + "erotica.hu", + "erotika.hu", + "film.hu", + "forum.hu", + "games.hu", + "hotel.hu", + "ingatlan.hu", + "jogasz.hu", + "konyvelo.hu", + "lakas.hu", + "media.hu", + "news.hu", + "reklam.hu", + "sex.hu", + "shop.hu", + "suli.hu", + "szex.hu", + "tozsde.hu", + "utazas.hu", + "video.hu", + "id", + "ac.id", + "biz.id", + "co.id", + "desa.id", + "go.id", + "mil.id", + "my.id", + "net.id", + "or.id", + "sch.id", + "web.id", + "ie", + "gov.ie", + "il", + "ac.il", + "co.il", + "gov.il", + "idf.il", + "k12.il", + "muni.il", + "net.il", + "org.il", + "im", + "ac.im", + "co.im", + "com.im", + "ltd.co.im", + "net.im", + "org.im", + "plc.co.im", + "tt.im", + "tv.im", + "in", + "co.in", + "firm.in", + "net.in", + "org.in", + "gen.in", + "ind.in", + "nic.in", + "ac.in", + "edu.in", + "res.in", + "gov.in", + "mil.in", + "info", + "int", + "eu.int", + "io", + "com.io", + "iq", + "gov.iq", + "edu.iq", + "mil.iq", + "com.iq", + "org.iq", + "net.iq", + "ir", + "ac.ir", + "co.ir", + "gov.ir", + "id.ir", + "net.ir", + "org.ir", + "sch.ir", + "xn--mgba3a4f16a.ir", + "xn--mgba3a4fra.ir", + "is", + "net.is", + "com.is", + "edu.is", + "gov.is", + "org.is", + "int.is", + "it", + "gov.it", + "edu.it", + "abr.it", + "abruzzo.it", + "aosta-valley.it", + "aostavalley.it", + "bas.it", + "basilicata.it", + "cal.it", + "calabria.it", + "cam.it", + "campania.it", + "emilia-romagna.it", + "emiliaromagna.it", + "emr.it", + "friuli-v-giulia.it", + "friuli-ve-giulia.it", + "friuli-vegiulia.it", + "friuli-venezia-giulia.it", + "friuli-veneziagiulia.it", + "friuli-vgiulia.it", + "friuliv-giulia.it", + "friulive-giulia.it", + "friulivegiulia.it", + "friulivenezia-giulia.it", + "friuliveneziagiulia.it", + "friulivgiulia.it", + "fvg.it", + "laz.it", + "lazio.it", + "lig.it", + "liguria.it", + "lom.it", + "lombardia.it", + "lombardy.it", + "lucania.it", + "mar.it", + "marche.it", + "mol.it", + "molise.it", + "piedmont.it", + "piemonte.it", + "pmn.it", + "pug.it", + "puglia.it", + "sar.it", + "sardegna.it", + "sardinia.it", + "sic.it", + "sicilia.it", + "sicily.it", + "taa.it", + "tos.it", + "toscana.it", + "trentino-a-adige.it", + "trentino-aadige.it", + "trentino-alto-adige.it", + "trentino-altoadige.it", + "trentino-s-tirol.it", + "trentino-stirol.it", + "trentino-sud-tirol.it", + "trentino-sudtirol.it", + "trentino-sued-tirol.it", + "trentino-suedtirol.it", + "trentinoa-adige.it", + "trentinoaadige.it", + "trentinoalto-adige.it", + "trentinoaltoadige.it", + "trentinos-tirol.it", + "trentinostirol.it", + "trentinosud-tirol.it", + "trentinosudtirol.it", + "trentinosued-tirol.it", + "trentinosuedtirol.it", + "tuscany.it", + "umb.it", + "umbria.it", + "val-d-aosta.it", + "val-daosta.it", + "vald-aosta.it", + "valdaosta.it", + "valle-aosta.it", + "valle-d-aosta.it", + "valle-daosta.it", + "valleaosta.it", + "valled-aosta.it", + "valledaosta.it", + "vallee-aoste.it", + "valleeaoste.it", + "vao.it", + "vda.it", + "ven.it", + "veneto.it", + "ag.it", + "agrigento.it", + "al.it", + "alessandria.it", + "alto-adige.it", + "altoadige.it", + "an.it", + "ancona.it", + "andria-barletta-trani.it", + "andria-trani-barletta.it", + "andriabarlettatrani.it", + "andriatranibarletta.it", + "ao.it", + "aosta.it", + "aoste.it", + "ap.it", + "aq.it", + "aquila.it", + "ar.it", + "arezzo.it", + "ascoli-piceno.it", + "ascolipiceno.it", + "asti.it", + "at.it", + "av.it", + "avellino.it", + "ba.it", + "balsan.it", + "bari.it", + "barletta-trani-andria.it", + "barlettatraniandria.it", + "belluno.it", + "benevento.it", + "bergamo.it", + "bg.it", + "bi.it", + "biella.it", + "bl.it", + "bn.it", + "bo.it", + "bologna.it", + "bolzano.it", + "bozen.it", + "br.it", + "brescia.it", + "brindisi.it", + "bs.it", + "bt.it", + "bz.it", + "ca.it", + "cagliari.it", + "caltanissetta.it", + "campidano-medio.it", + "campidanomedio.it", + "campobasso.it", + "carbonia-iglesias.it", + "carboniaiglesias.it", + "carrara-massa.it", + "carraramassa.it", + "caserta.it", + "catania.it", + "catanzaro.it", + "cb.it", + "ce.it", + "cesena-forli.it", + "cesenaforli.it", + "ch.it", + "chieti.it", + "ci.it", + "cl.it", + "cn.it", + "co.it", + "como.it", + "cosenza.it", + "cr.it", + "cremona.it", + "crotone.it", + "cs.it", + "ct.it", + "cuneo.it", + "cz.it", + "dell-ogliastra.it", + "dellogliastra.it", + "en.it", + "enna.it", + "fc.it", + "fe.it", + "fermo.it", + "ferrara.it", + "fg.it", + "fi.it", + "firenze.it", + "florence.it", + "fm.it", + "foggia.it", + "forli-cesena.it", + "forlicesena.it", + "fr.it", + "frosinone.it", + "ge.it", + "genoa.it", + "genova.it", + "go.it", + "gorizia.it", + "gr.it", + "grosseto.it", + "iglesias-carbonia.it", + "iglesiascarbonia.it", + "im.it", + "imperia.it", + "is.it", + "isernia.it", + "kr.it", + "la-spezia.it", + "laquila.it", + "laspezia.it", + "latina.it", + "lc.it", + "le.it", + "lecce.it", + "lecco.it", + "li.it", + "livorno.it", + "lo.it", + "lodi.it", + "lt.it", + "lu.it", + "lucca.it", + "macerata.it", + "mantova.it", + "massa-carrara.it", + "massacarrara.it", + "matera.it", + "mb.it", + "mc.it", + "me.it", + "medio-campidano.it", + "mediocampidano.it", + "messina.it", + "mi.it", + "milan.it", + "milano.it", + "mn.it", + "mo.it", + "modena.it", + "monza-brianza.it", + "monza-e-della-brianza.it", + "monza.it", + "monzabrianza.it", + "monzaebrianza.it", + "monzaedellabrianza.it", + "ms.it", + "mt.it", + "na.it", + "naples.it", + "napoli.it", + "no.it", + "novara.it", + "nu.it", + "nuoro.it", + "og.it", + "ogliastra.it", + "olbia-tempio.it", + "olbiatempio.it", + "or.it", + "oristano.it", + "ot.it", + "pa.it", + "padova.it", + "padua.it", + "palermo.it", + "parma.it", + "pavia.it", + "pc.it", + "pd.it", + "pe.it", + "perugia.it", + "pesaro-urbino.it", + "pesarourbino.it", + "pescara.it", + "pg.it", + "pi.it", + "piacenza.it", + "pisa.it", + "pistoia.it", + "pn.it", + "po.it", + "pordenone.it", + "potenza.it", + "pr.it", + "prato.it", + "pt.it", + "pu.it", + "pv.it", + "pz.it", + "ra.it", + "ragusa.it", + "ravenna.it", + "rc.it", + "re.it", + "reggio-calabria.it", + "reggio-emilia.it", + "reggiocalabria.it", + "reggioemilia.it", + "rg.it", + "ri.it", + "rieti.it", + "rimini.it", + "rm.it", + "rn.it", + "ro.it", + "roma.it", + "rome.it", + "rovigo.it", + "sa.it", + "salerno.it", + "sassari.it", + "savona.it", + "si.it", + "siena.it", + "siracusa.it", + "so.it", + "sondrio.it", + "sp.it", + "sr.it", + "ss.it", + "suedtirol.it", + "sv.it", + "ta.it", + "taranto.it", + "te.it", + "tempio-olbia.it", + "tempioolbia.it", + "teramo.it", + "terni.it", + "tn.it", + "to.it", + "torino.it", + "tp.it", + "tr.it", + "trani-andria-barletta.it", + "trani-barletta-andria.it", + "traniandriabarletta.it", + "tranibarlettaandria.it", + "trapani.it", + "trentino.it", + "trento.it", + "treviso.it", + "trieste.it", + "ts.it", + "turin.it", + "tv.it", + "ud.it", + "udine.it", + "urbino-pesaro.it", + "urbinopesaro.it", + "va.it", + "varese.it", + "vb.it", + "vc.it", + "ve.it", + "venezia.it", + "venice.it", + "verbania.it", + "vercelli.it", + "verona.it", + "vi.it", + "vibo-valentia.it", + "vibovalentia.it", + "vicenza.it", + "viterbo.it", + "vr.it", + "vs.it", + "vt.it", + "vv.it", + "je", + "co.je", + "net.je", + "org.je", + "*.jm", + "jo", + "com.jo", + "org.jo", + "net.jo", + "edu.jo", + "sch.jo", + "gov.jo", + "mil.jo", + "name.jo", + "jobs", + "jp", + "ac.jp", + "ad.jp", + "co.jp", + "ed.jp", + "go.jp", + "gr.jp", + "lg.jp", + "ne.jp", + "or.jp", + "aichi.jp", + "akita.jp", + "aomori.jp", + "chiba.jp", + "ehime.jp", + "fukui.jp", + "fukuoka.jp", + "fukushima.jp", + "gifu.jp", + "gunma.jp", + "hiroshima.jp", + "hokkaido.jp", + "hyogo.jp", + "ibaraki.jp", + "ishikawa.jp", + "iwate.jp", + "kagawa.jp", + "kagoshima.jp", + "kanagawa.jp", + "kochi.jp", + "kumamoto.jp", + "kyoto.jp", + "mie.jp", + "miyagi.jp", + "miyazaki.jp", + "nagano.jp", + "nagasaki.jp", + "nara.jp", + "niigata.jp", + "oita.jp", + "okayama.jp", + "okinawa.jp", + "osaka.jp", + "saga.jp", + "saitama.jp", + "shiga.jp", + "shimane.jp", + "shizuoka.jp", + "tochigi.jp", + "tokushima.jp", + "tokyo.jp", + "tottori.jp", + "toyama.jp", + "wakayama.jp", + "yamagata.jp", + "yamaguchi.jp", + "yamanashi.jp", + "xn--4pvxs.jp", + "xn--vgu402c.jp", + "xn--c3s14m.jp", + "xn--f6qx53a.jp", + "xn--8pvr4u.jp", + "xn--uist22h.jp", + "xn--djrs72d6uy.jp", + "xn--mkru45i.jp", + "xn--0trq7p7nn.jp", + "xn--8ltr62k.jp", + "xn--2m4a15e.jp", + "xn--efvn9s.jp", + "xn--32vp30h.jp", + "xn--4it797k.jp", + "xn--1lqs71d.jp", + "xn--5rtp49c.jp", + "xn--5js045d.jp", + "xn--ehqz56n.jp", + "xn--1lqs03n.jp", + "xn--qqqt11m.jp", + "xn--kbrq7o.jp", + "xn--pssu33l.jp", + "xn--ntsq17g.jp", + "xn--uisz3g.jp", + "xn--6btw5a.jp", + "xn--1ctwo.jp", + "xn--6orx2r.jp", + "xn--rht61e.jp", + "xn--rht27z.jp", + "xn--djty4k.jp", + "xn--nit225k.jp", + "xn--rht3d.jp", + "xn--klty5x.jp", + "xn--kltx9a.jp", + "xn--kltp7d.jp", + "xn--uuwu58a.jp", + "xn--zbx025d.jp", + "xn--ntso0iqx3a.jp", + "xn--elqq16h.jp", + "xn--4it168d.jp", + "xn--klt787d.jp", + "xn--rny31h.jp", + "xn--7t0a264c.jp", + "xn--5rtq34k.jp", + "xn--k7yn95e.jp", + "xn--tor131o.jp", + "xn--d5qv7z876c.jp", + "*.kawasaki.jp", + "*.kitakyushu.jp", + "*.kobe.jp", + "*.nagoya.jp", + "*.sapporo.jp", + "*.sendai.jp", + "*.yokohama.jp", + "!city.kawasaki.jp", + "!city.kitakyushu.jp", + "!city.kobe.jp", + "!city.nagoya.jp", + "!city.sapporo.jp", + "!city.sendai.jp", + "!city.yokohama.jp", + "aisai.aichi.jp", + "ama.aichi.jp", + "anjo.aichi.jp", + "asuke.aichi.jp", + "chiryu.aichi.jp", + "chita.aichi.jp", + "fuso.aichi.jp", + "gamagori.aichi.jp", + "handa.aichi.jp", + "hazu.aichi.jp", + "hekinan.aichi.jp", + "higashiura.aichi.jp", + "ichinomiya.aichi.jp", + "inazawa.aichi.jp", + "inuyama.aichi.jp", + "isshiki.aichi.jp", + "iwakura.aichi.jp", + "kanie.aichi.jp", + "kariya.aichi.jp", + "kasugai.aichi.jp", + "kira.aichi.jp", + "kiyosu.aichi.jp", + "komaki.aichi.jp", + "konan.aichi.jp", + "kota.aichi.jp", + "mihama.aichi.jp", + "miyoshi.aichi.jp", + "nishio.aichi.jp", + "nisshin.aichi.jp", + "obu.aichi.jp", + "oguchi.aichi.jp", + "oharu.aichi.jp", + "okazaki.aichi.jp", + "owariasahi.aichi.jp", + "seto.aichi.jp", + "shikatsu.aichi.jp", + "shinshiro.aichi.jp", + "shitara.aichi.jp", + "tahara.aichi.jp", + "takahama.aichi.jp", + "tobishima.aichi.jp", + "toei.aichi.jp", + "togo.aichi.jp", + "tokai.aichi.jp", + "tokoname.aichi.jp", + "toyoake.aichi.jp", + "toyohashi.aichi.jp", + "toyokawa.aichi.jp", + "toyone.aichi.jp", + "toyota.aichi.jp", + "tsushima.aichi.jp", + "yatomi.aichi.jp", + "akita.akita.jp", + "daisen.akita.jp", + "fujisato.akita.jp", + "gojome.akita.jp", + "hachirogata.akita.jp", + "happou.akita.jp", + "higashinaruse.akita.jp", + "honjo.akita.jp", + "honjyo.akita.jp", + "ikawa.akita.jp", + "kamikoani.akita.jp", + "kamioka.akita.jp", + "katagami.akita.jp", + "kazuno.akita.jp", + "kitaakita.akita.jp", + "kosaka.akita.jp", + "kyowa.akita.jp", + "misato.akita.jp", + "mitane.akita.jp", + "moriyoshi.akita.jp", + "nikaho.akita.jp", + "noshiro.akita.jp", + "odate.akita.jp", + "oga.akita.jp", + "ogata.akita.jp", + "semboku.akita.jp", + "yokote.akita.jp", + "yurihonjo.akita.jp", + "aomori.aomori.jp", + "gonohe.aomori.jp", + "hachinohe.aomori.jp", + "hashikami.aomori.jp", + "hiranai.aomori.jp", + "hirosaki.aomori.jp", + "itayanagi.aomori.jp", + "kuroishi.aomori.jp", + "misawa.aomori.jp", + "mutsu.aomori.jp", + "nakadomari.aomori.jp", + "noheji.aomori.jp", + "oirase.aomori.jp", + "owani.aomori.jp", + "rokunohe.aomori.jp", + "sannohe.aomori.jp", + "shichinohe.aomori.jp", + "shingo.aomori.jp", + "takko.aomori.jp", + "towada.aomori.jp", + "tsugaru.aomori.jp", + "tsuruta.aomori.jp", + "abiko.chiba.jp", + "asahi.chiba.jp", + "chonan.chiba.jp", + "chosei.chiba.jp", + "choshi.chiba.jp", + "chuo.chiba.jp", + "funabashi.chiba.jp", + "futtsu.chiba.jp", + "hanamigawa.chiba.jp", + "ichihara.chiba.jp", + "ichikawa.chiba.jp", + "ichinomiya.chiba.jp", + "inzai.chiba.jp", + "isumi.chiba.jp", + "kamagaya.chiba.jp", + "kamogawa.chiba.jp", + "kashiwa.chiba.jp", + "katori.chiba.jp", + "katsuura.chiba.jp", + "kimitsu.chiba.jp", + "kisarazu.chiba.jp", + "kozaki.chiba.jp", + "kujukuri.chiba.jp", + "kyonan.chiba.jp", + "matsudo.chiba.jp", + "midori.chiba.jp", + "mihama.chiba.jp", + "minamiboso.chiba.jp", + "mobara.chiba.jp", + "mutsuzawa.chiba.jp", + "nagara.chiba.jp", + "nagareyama.chiba.jp", + "narashino.chiba.jp", + "narita.chiba.jp", + "noda.chiba.jp", + "oamishirasato.chiba.jp", + "omigawa.chiba.jp", + "onjuku.chiba.jp", + "otaki.chiba.jp", + "sakae.chiba.jp", + "sakura.chiba.jp", + "shimofusa.chiba.jp", + "shirako.chiba.jp", + "shiroi.chiba.jp", + "shisui.chiba.jp", + "sodegaura.chiba.jp", + "sosa.chiba.jp", + "tako.chiba.jp", + "tateyama.chiba.jp", + "togane.chiba.jp", + "tohnosho.chiba.jp", + "tomisato.chiba.jp", + "urayasu.chiba.jp", + "yachimata.chiba.jp", + "yachiyo.chiba.jp", + "yokaichiba.chiba.jp", + "yokoshibahikari.chiba.jp", + "yotsukaido.chiba.jp", + "ainan.ehime.jp", + "honai.ehime.jp", + "ikata.ehime.jp", + "imabari.ehime.jp", + "iyo.ehime.jp", + "kamijima.ehime.jp", + "kihoku.ehime.jp", + "kumakogen.ehime.jp", + "masaki.ehime.jp", + "matsuno.ehime.jp", + "matsuyama.ehime.jp", + "namikata.ehime.jp", + "niihama.ehime.jp", + "ozu.ehime.jp", + "saijo.ehime.jp", + "seiyo.ehime.jp", + "shikokuchuo.ehime.jp", + "tobe.ehime.jp", + "toon.ehime.jp", + "uchiko.ehime.jp", + "uwajima.ehime.jp", + "yawatahama.ehime.jp", + "echizen.fukui.jp", + "eiheiji.fukui.jp", + "fukui.fukui.jp", + "ikeda.fukui.jp", + "katsuyama.fukui.jp", + "mihama.fukui.jp", + "minamiechizen.fukui.jp", + "obama.fukui.jp", + "ohi.fukui.jp", + "ono.fukui.jp", + "sabae.fukui.jp", + "sakai.fukui.jp", + "takahama.fukui.jp", + "tsuruga.fukui.jp", + "wakasa.fukui.jp", + "ashiya.fukuoka.jp", + "buzen.fukuoka.jp", + "chikugo.fukuoka.jp", + "chikuho.fukuoka.jp", + "chikujo.fukuoka.jp", + "chikushino.fukuoka.jp", + "chikuzen.fukuoka.jp", + "chuo.fukuoka.jp", + "dazaifu.fukuoka.jp", + "fukuchi.fukuoka.jp", + "hakata.fukuoka.jp", + "higashi.fukuoka.jp", + "hirokawa.fukuoka.jp", + "hisayama.fukuoka.jp", + "iizuka.fukuoka.jp", + "inatsuki.fukuoka.jp", + "kaho.fukuoka.jp", + "kasuga.fukuoka.jp", + "kasuya.fukuoka.jp", + "kawara.fukuoka.jp", + "keisen.fukuoka.jp", + "koga.fukuoka.jp", + "kurate.fukuoka.jp", + "kurogi.fukuoka.jp", + "kurume.fukuoka.jp", + "minami.fukuoka.jp", + "miyako.fukuoka.jp", + "miyama.fukuoka.jp", + "miyawaka.fukuoka.jp", + "mizumaki.fukuoka.jp", + "munakata.fukuoka.jp", + "nakagawa.fukuoka.jp", + "nakama.fukuoka.jp", + "nishi.fukuoka.jp", + "nogata.fukuoka.jp", + "ogori.fukuoka.jp", + "okagaki.fukuoka.jp", + "okawa.fukuoka.jp", + "oki.fukuoka.jp", + "omuta.fukuoka.jp", + "onga.fukuoka.jp", + "onojo.fukuoka.jp", + "oto.fukuoka.jp", + "saigawa.fukuoka.jp", + "sasaguri.fukuoka.jp", + "shingu.fukuoka.jp", + "shinyoshitomi.fukuoka.jp", + "shonai.fukuoka.jp", + "soeda.fukuoka.jp", + "sue.fukuoka.jp", + "tachiarai.fukuoka.jp", + "tagawa.fukuoka.jp", + "takata.fukuoka.jp", + "toho.fukuoka.jp", + "toyotsu.fukuoka.jp", + "tsuiki.fukuoka.jp", + "ukiha.fukuoka.jp", + "umi.fukuoka.jp", + "usui.fukuoka.jp", + "yamada.fukuoka.jp", + "yame.fukuoka.jp", + "yanagawa.fukuoka.jp", + "yukuhashi.fukuoka.jp", + "aizubange.fukushima.jp", + "aizumisato.fukushima.jp", + "aizuwakamatsu.fukushima.jp", + "asakawa.fukushima.jp", + "bandai.fukushima.jp", + "date.fukushima.jp", + "fukushima.fukushima.jp", + "furudono.fukushima.jp", + "futaba.fukushima.jp", + "hanawa.fukushima.jp", + "higashi.fukushima.jp", + "hirata.fukushima.jp", + "hirono.fukushima.jp", + "iitate.fukushima.jp", + "inawashiro.fukushima.jp", + "ishikawa.fukushima.jp", + "iwaki.fukushima.jp", + "izumizaki.fukushima.jp", + "kagamiishi.fukushima.jp", + "kaneyama.fukushima.jp", + "kawamata.fukushima.jp", + "kitakata.fukushima.jp", + "kitashiobara.fukushima.jp", + "koori.fukushima.jp", + "koriyama.fukushima.jp", + "kunimi.fukushima.jp", + "miharu.fukushima.jp", + "mishima.fukushima.jp", + "namie.fukushima.jp", + "nango.fukushima.jp", + "nishiaizu.fukushima.jp", + "nishigo.fukushima.jp", + "okuma.fukushima.jp", + "omotego.fukushima.jp", + "ono.fukushima.jp", + "otama.fukushima.jp", + "samegawa.fukushima.jp", + "shimogo.fukushima.jp", + "shirakawa.fukushima.jp", + "showa.fukushima.jp", + "soma.fukushima.jp", + "sukagawa.fukushima.jp", + "taishin.fukushima.jp", + "tamakawa.fukushima.jp", + "tanagura.fukushima.jp", + "tenei.fukushima.jp", + "yabuki.fukushima.jp", + "yamato.fukushima.jp", + "yamatsuri.fukushima.jp", + "yanaizu.fukushima.jp", + "yugawa.fukushima.jp", + "anpachi.gifu.jp", + "ena.gifu.jp", + "gifu.gifu.jp", + "ginan.gifu.jp", + "godo.gifu.jp", + "gujo.gifu.jp", + "hashima.gifu.jp", + "hichiso.gifu.jp", + "hida.gifu.jp", + "higashishirakawa.gifu.jp", + "ibigawa.gifu.jp", + "ikeda.gifu.jp", + "kakamigahara.gifu.jp", + "kani.gifu.jp", + "kasahara.gifu.jp", + "kasamatsu.gifu.jp", + "kawaue.gifu.jp", + "kitagata.gifu.jp", + "mino.gifu.jp", + "minokamo.gifu.jp", + "mitake.gifu.jp", + "mizunami.gifu.jp", + "motosu.gifu.jp", + "nakatsugawa.gifu.jp", + "ogaki.gifu.jp", + "sakahogi.gifu.jp", + "seki.gifu.jp", + "sekigahara.gifu.jp", + "shirakawa.gifu.jp", + "tajimi.gifu.jp", + "takayama.gifu.jp", + "tarui.gifu.jp", + "toki.gifu.jp", + "tomika.gifu.jp", + "wanouchi.gifu.jp", + "yamagata.gifu.jp", + "yaotsu.gifu.jp", + "yoro.gifu.jp", + "annaka.gunma.jp", + "chiyoda.gunma.jp", + "fujioka.gunma.jp", + "higashiagatsuma.gunma.jp", + "isesaki.gunma.jp", + "itakura.gunma.jp", + "kanna.gunma.jp", + "kanra.gunma.jp", + "katashina.gunma.jp", + "kawaba.gunma.jp", + "kiryu.gunma.jp", + "kusatsu.gunma.jp", + "maebashi.gunma.jp", + "meiwa.gunma.jp", + "midori.gunma.jp", + "minakami.gunma.jp", + "naganohara.gunma.jp", + "nakanojo.gunma.jp", + "nanmoku.gunma.jp", + "numata.gunma.jp", + "oizumi.gunma.jp", + "ora.gunma.jp", + "ota.gunma.jp", + "shibukawa.gunma.jp", + "shimonita.gunma.jp", + "shinto.gunma.jp", + "showa.gunma.jp", + "takasaki.gunma.jp", + "takayama.gunma.jp", + "tamamura.gunma.jp", + "tatebayashi.gunma.jp", + "tomioka.gunma.jp", + "tsukiyono.gunma.jp", + "tsumagoi.gunma.jp", + "ueno.gunma.jp", + "yoshioka.gunma.jp", + "asaminami.hiroshima.jp", + "daiwa.hiroshima.jp", + "etajima.hiroshima.jp", + "fuchu.hiroshima.jp", + "fukuyama.hiroshima.jp", + "hatsukaichi.hiroshima.jp", + "higashihiroshima.hiroshima.jp", + "hongo.hiroshima.jp", + "jinsekikogen.hiroshima.jp", + "kaita.hiroshima.jp", + "kui.hiroshima.jp", + "kumano.hiroshima.jp", + "kure.hiroshima.jp", + "mihara.hiroshima.jp", + "miyoshi.hiroshima.jp", + "naka.hiroshima.jp", + "onomichi.hiroshima.jp", + "osakikamijima.hiroshima.jp", + "otake.hiroshima.jp", + "saka.hiroshima.jp", + "sera.hiroshima.jp", + "seranishi.hiroshima.jp", + "shinichi.hiroshima.jp", + "shobara.hiroshima.jp", + "takehara.hiroshima.jp", + "abashiri.hokkaido.jp", + "abira.hokkaido.jp", + "aibetsu.hokkaido.jp", + "akabira.hokkaido.jp", + "akkeshi.hokkaido.jp", + "asahikawa.hokkaido.jp", + "ashibetsu.hokkaido.jp", + "ashoro.hokkaido.jp", + "assabu.hokkaido.jp", + "atsuma.hokkaido.jp", + "bibai.hokkaido.jp", + "biei.hokkaido.jp", + "bifuka.hokkaido.jp", + "bihoro.hokkaido.jp", + "biratori.hokkaido.jp", + "chippubetsu.hokkaido.jp", + "chitose.hokkaido.jp", + "date.hokkaido.jp", + "ebetsu.hokkaido.jp", + "embetsu.hokkaido.jp", + "eniwa.hokkaido.jp", + "erimo.hokkaido.jp", + "esan.hokkaido.jp", + "esashi.hokkaido.jp", + "fukagawa.hokkaido.jp", + "fukushima.hokkaido.jp", + "furano.hokkaido.jp", + "furubira.hokkaido.jp", + "haboro.hokkaido.jp", + "hakodate.hokkaido.jp", + "hamatonbetsu.hokkaido.jp", + "hidaka.hokkaido.jp", + "higashikagura.hokkaido.jp", + "higashikawa.hokkaido.jp", + "hiroo.hokkaido.jp", + "hokuryu.hokkaido.jp", + "hokuto.hokkaido.jp", + "honbetsu.hokkaido.jp", + "horokanai.hokkaido.jp", + "horonobe.hokkaido.jp", + "ikeda.hokkaido.jp", + "imakane.hokkaido.jp", + "ishikari.hokkaido.jp", + "iwamizawa.hokkaido.jp", + "iwanai.hokkaido.jp", + "kamifurano.hokkaido.jp", + "kamikawa.hokkaido.jp", + "kamishihoro.hokkaido.jp", + "kamisunagawa.hokkaido.jp", + "kamoenai.hokkaido.jp", + "kayabe.hokkaido.jp", + "kembuchi.hokkaido.jp", + "kikonai.hokkaido.jp", + "kimobetsu.hokkaido.jp", + "kitahiroshima.hokkaido.jp", + "kitami.hokkaido.jp", + "kiyosato.hokkaido.jp", + "koshimizu.hokkaido.jp", + "kunneppu.hokkaido.jp", + "kuriyama.hokkaido.jp", + "kuromatsunai.hokkaido.jp", + "kushiro.hokkaido.jp", + "kutchan.hokkaido.jp", + "kyowa.hokkaido.jp", + "mashike.hokkaido.jp", + "matsumae.hokkaido.jp", + "mikasa.hokkaido.jp", + "minamifurano.hokkaido.jp", + "mombetsu.hokkaido.jp", + "moseushi.hokkaido.jp", + "mukawa.hokkaido.jp", + "muroran.hokkaido.jp", + "naie.hokkaido.jp", + "nakagawa.hokkaido.jp", + "nakasatsunai.hokkaido.jp", + "nakatombetsu.hokkaido.jp", + "nanae.hokkaido.jp", + "nanporo.hokkaido.jp", + "nayoro.hokkaido.jp", + "nemuro.hokkaido.jp", + "niikappu.hokkaido.jp", + "niki.hokkaido.jp", + "nishiokoppe.hokkaido.jp", + "noboribetsu.hokkaido.jp", + "numata.hokkaido.jp", + "obihiro.hokkaido.jp", + "obira.hokkaido.jp", + "oketo.hokkaido.jp", + "okoppe.hokkaido.jp", + "otaru.hokkaido.jp", + "otobe.hokkaido.jp", + "otofuke.hokkaido.jp", + "otoineppu.hokkaido.jp", + "oumu.hokkaido.jp", + "ozora.hokkaido.jp", + "pippu.hokkaido.jp", + "rankoshi.hokkaido.jp", + "rebun.hokkaido.jp", + "rikubetsu.hokkaido.jp", + "rishiri.hokkaido.jp", + "rishirifuji.hokkaido.jp", + "saroma.hokkaido.jp", + "sarufutsu.hokkaido.jp", + "shakotan.hokkaido.jp", + "shari.hokkaido.jp", + "shibecha.hokkaido.jp", + "shibetsu.hokkaido.jp", + "shikabe.hokkaido.jp", + "shikaoi.hokkaido.jp", + "shimamaki.hokkaido.jp", + "shimizu.hokkaido.jp", + "shimokawa.hokkaido.jp", + "shinshinotsu.hokkaido.jp", + "shintoku.hokkaido.jp", + "shiranuka.hokkaido.jp", + "shiraoi.hokkaido.jp", + "shiriuchi.hokkaido.jp", + "sobetsu.hokkaido.jp", + "sunagawa.hokkaido.jp", + "taiki.hokkaido.jp", + "takasu.hokkaido.jp", + "takikawa.hokkaido.jp", + "takinoue.hokkaido.jp", + "teshikaga.hokkaido.jp", + "tobetsu.hokkaido.jp", + "tohma.hokkaido.jp", + "tomakomai.hokkaido.jp", + "tomari.hokkaido.jp", + "toya.hokkaido.jp", + "toyako.hokkaido.jp", + "toyotomi.hokkaido.jp", + "toyoura.hokkaido.jp", + "tsubetsu.hokkaido.jp", + "tsukigata.hokkaido.jp", + "urakawa.hokkaido.jp", + "urausu.hokkaido.jp", + "uryu.hokkaido.jp", + "utashinai.hokkaido.jp", + "wakkanai.hokkaido.jp", + "wassamu.hokkaido.jp", + "yakumo.hokkaido.jp", + "yoichi.hokkaido.jp", + "aioi.hyogo.jp", + "akashi.hyogo.jp", + "ako.hyogo.jp", + "amagasaki.hyogo.jp", + "aogaki.hyogo.jp", + "asago.hyogo.jp", + "ashiya.hyogo.jp", + "awaji.hyogo.jp", + "fukusaki.hyogo.jp", + "goshiki.hyogo.jp", + "harima.hyogo.jp", + "himeji.hyogo.jp", + "ichikawa.hyogo.jp", + "inagawa.hyogo.jp", + "itami.hyogo.jp", + "kakogawa.hyogo.jp", + "kamigori.hyogo.jp", + "kamikawa.hyogo.jp", + "kasai.hyogo.jp", + "kasuga.hyogo.jp", + "kawanishi.hyogo.jp", + "miki.hyogo.jp", + "minamiawaji.hyogo.jp", + "nishinomiya.hyogo.jp", + "nishiwaki.hyogo.jp", + "ono.hyogo.jp", + "sanda.hyogo.jp", + "sannan.hyogo.jp", + "sasayama.hyogo.jp", + "sayo.hyogo.jp", + "shingu.hyogo.jp", + "shinonsen.hyogo.jp", + "shiso.hyogo.jp", + "sumoto.hyogo.jp", + "taishi.hyogo.jp", + "taka.hyogo.jp", + "takarazuka.hyogo.jp", + "takasago.hyogo.jp", + "takino.hyogo.jp", + "tamba.hyogo.jp", + "tatsuno.hyogo.jp", + "toyooka.hyogo.jp", + "yabu.hyogo.jp", + "yashiro.hyogo.jp", + "yoka.hyogo.jp", + "yokawa.hyogo.jp", + "ami.ibaraki.jp", + "asahi.ibaraki.jp", + "bando.ibaraki.jp", + "chikusei.ibaraki.jp", + "daigo.ibaraki.jp", + "fujishiro.ibaraki.jp", + "hitachi.ibaraki.jp", + "hitachinaka.ibaraki.jp", + "hitachiomiya.ibaraki.jp", + "hitachiota.ibaraki.jp", + "ibaraki.ibaraki.jp", + "ina.ibaraki.jp", + "inashiki.ibaraki.jp", + "itako.ibaraki.jp", + "iwama.ibaraki.jp", + "joso.ibaraki.jp", + "kamisu.ibaraki.jp", + "kasama.ibaraki.jp", + "kashima.ibaraki.jp", + "kasumigaura.ibaraki.jp", + "koga.ibaraki.jp", + "miho.ibaraki.jp", + "mito.ibaraki.jp", + "moriya.ibaraki.jp", + "naka.ibaraki.jp", + "namegata.ibaraki.jp", + "oarai.ibaraki.jp", + "ogawa.ibaraki.jp", + "omitama.ibaraki.jp", + "ryugasaki.ibaraki.jp", + "sakai.ibaraki.jp", + "sakuragawa.ibaraki.jp", + "shimodate.ibaraki.jp", + "shimotsuma.ibaraki.jp", + "shirosato.ibaraki.jp", + "sowa.ibaraki.jp", + "suifu.ibaraki.jp", + "takahagi.ibaraki.jp", + "tamatsukuri.ibaraki.jp", + "tokai.ibaraki.jp", + "tomobe.ibaraki.jp", + "tone.ibaraki.jp", + "toride.ibaraki.jp", + "tsuchiura.ibaraki.jp", + "tsukuba.ibaraki.jp", + "uchihara.ibaraki.jp", + "ushiku.ibaraki.jp", + "yachiyo.ibaraki.jp", + "yamagata.ibaraki.jp", + "yawara.ibaraki.jp", + "yuki.ibaraki.jp", + "anamizu.ishikawa.jp", + "hakui.ishikawa.jp", + "hakusan.ishikawa.jp", + "kaga.ishikawa.jp", + "kahoku.ishikawa.jp", + "kanazawa.ishikawa.jp", + "kawakita.ishikawa.jp", + "komatsu.ishikawa.jp", + "nakanoto.ishikawa.jp", + "nanao.ishikawa.jp", + "nomi.ishikawa.jp", + "nonoichi.ishikawa.jp", + "noto.ishikawa.jp", + "shika.ishikawa.jp", + "suzu.ishikawa.jp", + "tsubata.ishikawa.jp", + "tsurugi.ishikawa.jp", + "uchinada.ishikawa.jp", + "wajima.ishikawa.jp", + "fudai.iwate.jp", + "fujisawa.iwate.jp", + "hanamaki.iwate.jp", + "hiraizumi.iwate.jp", + "hirono.iwate.jp", + "ichinohe.iwate.jp", + "ichinoseki.iwate.jp", + "iwaizumi.iwate.jp", + "iwate.iwate.jp", + "joboji.iwate.jp", + "kamaishi.iwate.jp", + "kanegasaki.iwate.jp", + "karumai.iwate.jp", + "kawai.iwate.jp", + "kitakami.iwate.jp", + "kuji.iwate.jp", + "kunohe.iwate.jp", + "kuzumaki.iwate.jp", + "miyako.iwate.jp", + "mizusawa.iwate.jp", + "morioka.iwate.jp", + "ninohe.iwate.jp", + "noda.iwate.jp", + "ofunato.iwate.jp", + "oshu.iwate.jp", + "otsuchi.iwate.jp", + "rikuzentakata.iwate.jp", + "shiwa.iwate.jp", + "shizukuishi.iwate.jp", + "sumita.iwate.jp", + "tanohata.iwate.jp", + "tono.iwate.jp", + "yahaba.iwate.jp", + "yamada.iwate.jp", + "ayagawa.kagawa.jp", + "higashikagawa.kagawa.jp", + "kanonji.kagawa.jp", + "kotohira.kagawa.jp", + "manno.kagawa.jp", + "marugame.kagawa.jp", + "mitoyo.kagawa.jp", + "naoshima.kagawa.jp", + "sanuki.kagawa.jp", + "tadotsu.kagawa.jp", + "takamatsu.kagawa.jp", + "tonosho.kagawa.jp", + "uchinomi.kagawa.jp", + "utazu.kagawa.jp", + "zentsuji.kagawa.jp", + "akune.kagoshima.jp", + "amami.kagoshima.jp", + "hioki.kagoshima.jp", + "isa.kagoshima.jp", + "isen.kagoshima.jp", + "izumi.kagoshima.jp", + "kagoshima.kagoshima.jp", + "kanoya.kagoshima.jp", + "kawanabe.kagoshima.jp", + "kinko.kagoshima.jp", + "kouyama.kagoshima.jp", + "makurazaki.kagoshima.jp", + "matsumoto.kagoshima.jp", + "minamitane.kagoshima.jp", + "nakatane.kagoshima.jp", + "nishinoomote.kagoshima.jp", + "satsumasendai.kagoshima.jp", + "soo.kagoshima.jp", + "tarumizu.kagoshima.jp", + "yusui.kagoshima.jp", + "aikawa.kanagawa.jp", + "atsugi.kanagawa.jp", + "ayase.kanagawa.jp", + "chigasaki.kanagawa.jp", + "ebina.kanagawa.jp", + "fujisawa.kanagawa.jp", + "hadano.kanagawa.jp", + "hakone.kanagawa.jp", + "hiratsuka.kanagawa.jp", + "isehara.kanagawa.jp", + "kaisei.kanagawa.jp", + "kamakura.kanagawa.jp", + "kiyokawa.kanagawa.jp", + "matsuda.kanagawa.jp", + "minamiashigara.kanagawa.jp", + "miura.kanagawa.jp", + "nakai.kanagawa.jp", + "ninomiya.kanagawa.jp", + "odawara.kanagawa.jp", + "oi.kanagawa.jp", + "oiso.kanagawa.jp", + "sagamihara.kanagawa.jp", + "samukawa.kanagawa.jp", + "tsukui.kanagawa.jp", + "yamakita.kanagawa.jp", + "yamato.kanagawa.jp", + "yokosuka.kanagawa.jp", + "yugawara.kanagawa.jp", + "zama.kanagawa.jp", + "zushi.kanagawa.jp", + "aki.kochi.jp", + "geisei.kochi.jp", + "hidaka.kochi.jp", + "higashitsuno.kochi.jp", + "ino.kochi.jp", + "kagami.kochi.jp", + "kami.kochi.jp", + "kitagawa.kochi.jp", + "kochi.kochi.jp", + "mihara.kochi.jp", + "motoyama.kochi.jp", + "muroto.kochi.jp", + "nahari.kochi.jp", + "nakamura.kochi.jp", + "nankoku.kochi.jp", + "nishitosa.kochi.jp", + "niyodogawa.kochi.jp", + "ochi.kochi.jp", + "okawa.kochi.jp", + "otoyo.kochi.jp", + "otsuki.kochi.jp", + "sakawa.kochi.jp", + "sukumo.kochi.jp", + "susaki.kochi.jp", + "tosa.kochi.jp", + "tosashimizu.kochi.jp", + "toyo.kochi.jp", + "tsuno.kochi.jp", + "umaji.kochi.jp", + "yasuda.kochi.jp", + "yusuhara.kochi.jp", + "amakusa.kumamoto.jp", + "arao.kumamoto.jp", + "aso.kumamoto.jp", + "choyo.kumamoto.jp", + "gyokuto.kumamoto.jp", + "kamiamakusa.kumamoto.jp", + "kikuchi.kumamoto.jp", + "kumamoto.kumamoto.jp", + "mashiki.kumamoto.jp", + "mifune.kumamoto.jp", + "minamata.kumamoto.jp", + "minamioguni.kumamoto.jp", + "nagasu.kumamoto.jp", + "nishihara.kumamoto.jp", + "oguni.kumamoto.jp", + "ozu.kumamoto.jp", + "sumoto.kumamoto.jp", + "takamori.kumamoto.jp", + "uki.kumamoto.jp", + "uto.kumamoto.jp", + "yamaga.kumamoto.jp", + "yamato.kumamoto.jp", + "yatsushiro.kumamoto.jp", + "ayabe.kyoto.jp", + "fukuchiyama.kyoto.jp", + "higashiyama.kyoto.jp", + "ide.kyoto.jp", + "ine.kyoto.jp", + "joyo.kyoto.jp", + "kameoka.kyoto.jp", + "kamo.kyoto.jp", + "kita.kyoto.jp", + "kizu.kyoto.jp", + "kumiyama.kyoto.jp", + "kyotamba.kyoto.jp", + "kyotanabe.kyoto.jp", + "kyotango.kyoto.jp", + "maizuru.kyoto.jp", + "minami.kyoto.jp", + "minamiyamashiro.kyoto.jp", + "miyazu.kyoto.jp", + "muko.kyoto.jp", + "nagaokakyo.kyoto.jp", + "nakagyo.kyoto.jp", + "nantan.kyoto.jp", + "oyamazaki.kyoto.jp", + "sakyo.kyoto.jp", + "seika.kyoto.jp", + "tanabe.kyoto.jp", + "uji.kyoto.jp", + "ujitawara.kyoto.jp", + "wazuka.kyoto.jp", + "yamashina.kyoto.jp", + "yawata.kyoto.jp", + "asahi.mie.jp", + "inabe.mie.jp", + "ise.mie.jp", + "kameyama.mie.jp", + "kawagoe.mie.jp", + "kiho.mie.jp", + "kisosaki.mie.jp", + "kiwa.mie.jp", + "komono.mie.jp", + "kumano.mie.jp", + "kuwana.mie.jp", + "matsusaka.mie.jp", + "meiwa.mie.jp", + "mihama.mie.jp", + "minamiise.mie.jp", + "misugi.mie.jp", + "miyama.mie.jp", + "nabari.mie.jp", + "shima.mie.jp", + "suzuka.mie.jp", + "tado.mie.jp", + "taiki.mie.jp", + "taki.mie.jp", + "tamaki.mie.jp", + "toba.mie.jp", + "tsu.mie.jp", + "udono.mie.jp", + "ureshino.mie.jp", + "watarai.mie.jp", + "yokkaichi.mie.jp", + "furukawa.miyagi.jp", + "higashimatsushima.miyagi.jp", + "ishinomaki.miyagi.jp", + "iwanuma.miyagi.jp", + "kakuda.miyagi.jp", + "kami.miyagi.jp", + "kawasaki.miyagi.jp", + "marumori.miyagi.jp", + "matsushima.miyagi.jp", + "minamisanriku.miyagi.jp", + "misato.miyagi.jp", + "murata.miyagi.jp", + "natori.miyagi.jp", + "ogawara.miyagi.jp", + "ohira.miyagi.jp", + "onagawa.miyagi.jp", + "osaki.miyagi.jp", + "rifu.miyagi.jp", + "semine.miyagi.jp", + "shibata.miyagi.jp", + "shichikashuku.miyagi.jp", + "shikama.miyagi.jp", + "shiogama.miyagi.jp", + "shiroishi.miyagi.jp", + "tagajo.miyagi.jp", + "taiwa.miyagi.jp", + "tome.miyagi.jp", + "tomiya.miyagi.jp", + "wakuya.miyagi.jp", + "watari.miyagi.jp", + "yamamoto.miyagi.jp", + "zao.miyagi.jp", + "aya.miyazaki.jp", + "ebino.miyazaki.jp", + "gokase.miyazaki.jp", + "hyuga.miyazaki.jp", + "kadogawa.miyazaki.jp", + "kawaminami.miyazaki.jp", + "kijo.miyazaki.jp", + "kitagawa.miyazaki.jp", + "kitakata.miyazaki.jp", + "kitaura.miyazaki.jp", + "kobayashi.miyazaki.jp", + "kunitomi.miyazaki.jp", + "kushima.miyazaki.jp", + "mimata.miyazaki.jp", + "miyakonojo.miyazaki.jp", + "miyazaki.miyazaki.jp", + "morotsuka.miyazaki.jp", + "nichinan.miyazaki.jp", + "nishimera.miyazaki.jp", + "nobeoka.miyazaki.jp", + "saito.miyazaki.jp", + "shiiba.miyazaki.jp", + "shintomi.miyazaki.jp", + "takaharu.miyazaki.jp", + "takanabe.miyazaki.jp", + "takazaki.miyazaki.jp", + "tsuno.miyazaki.jp", + "achi.nagano.jp", + "agematsu.nagano.jp", + "anan.nagano.jp", + "aoki.nagano.jp", + "asahi.nagano.jp", + "azumino.nagano.jp", + "chikuhoku.nagano.jp", + "chikuma.nagano.jp", + "chino.nagano.jp", + "fujimi.nagano.jp", + "hakuba.nagano.jp", + "hara.nagano.jp", + "hiraya.nagano.jp", + "iida.nagano.jp", + "iijima.nagano.jp", + "iiyama.nagano.jp", + "iizuna.nagano.jp", + "ikeda.nagano.jp", + "ikusaka.nagano.jp", + "ina.nagano.jp", + "karuizawa.nagano.jp", + "kawakami.nagano.jp", + "kiso.nagano.jp", + "kisofukushima.nagano.jp", + "kitaaiki.nagano.jp", + "komagane.nagano.jp", + "komoro.nagano.jp", + "matsukawa.nagano.jp", + "matsumoto.nagano.jp", + "miasa.nagano.jp", + "minamiaiki.nagano.jp", + "minamimaki.nagano.jp", + "minamiminowa.nagano.jp", + "minowa.nagano.jp", + "miyada.nagano.jp", + "miyota.nagano.jp", + "mochizuki.nagano.jp", + "nagano.nagano.jp", + "nagawa.nagano.jp", + "nagiso.nagano.jp", + "nakagawa.nagano.jp", + "nakano.nagano.jp", + "nozawaonsen.nagano.jp", + "obuse.nagano.jp", + "ogawa.nagano.jp", + "okaya.nagano.jp", + "omachi.nagano.jp", + "omi.nagano.jp", + "ookuwa.nagano.jp", + "ooshika.nagano.jp", + "otaki.nagano.jp", + "otari.nagano.jp", + "sakae.nagano.jp", + "sakaki.nagano.jp", + "saku.nagano.jp", + "sakuho.nagano.jp", + "shimosuwa.nagano.jp", + "shinanomachi.nagano.jp", + "shiojiri.nagano.jp", + "suwa.nagano.jp", + "suzaka.nagano.jp", + "takagi.nagano.jp", + "takamori.nagano.jp", + "takayama.nagano.jp", + "tateshina.nagano.jp", + "tatsuno.nagano.jp", + "togakushi.nagano.jp", + "togura.nagano.jp", + "tomi.nagano.jp", + "ueda.nagano.jp", + "wada.nagano.jp", + "yamagata.nagano.jp", + "yamanouchi.nagano.jp", + "yasaka.nagano.jp", + "yasuoka.nagano.jp", + "chijiwa.nagasaki.jp", + "futsu.nagasaki.jp", + "goto.nagasaki.jp", + "hasami.nagasaki.jp", + "hirado.nagasaki.jp", + "iki.nagasaki.jp", + "isahaya.nagasaki.jp", + "kawatana.nagasaki.jp", + "kuchinotsu.nagasaki.jp", + "matsuura.nagasaki.jp", + "nagasaki.nagasaki.jp", + "obama.nagasaki.jp", + "omura.nagasaki.jp", + "oseto.nagasaki.jp", + "saikai.nagasaki.jp", + "sasebo.nagasaki.jp", + "seihi.nagasaki.jp", + "shimabara.nagasaki.jp", + "shinkamigoto.nagasaki.jp", + "togitsu.nagasaki.jp", + "tsushima.nagasaki.jp", + "unzen.nagasaki.jp", + "ando.nara.jp", + "gose.nara.jp", + "heguri.nara.jp", + "higashiyoshino.nara.jp", + "ikaruga.nara.jp", + "ikoma.nara.jp", + "kamikitayama.nara.jp", + "kanmaki.nara.jp", + "kashiba.nara.jp", + "kashihara.nara.jp", + "katsuragi.nara.jp", + "kawai.nara.jp", + "kawakami.nara.jp", + "kawanishi.nara.jp", + "koryo.nara.jp", + "kurotaki.nara.jp", + "mitsue.nara.jp", + "miyake.nara.jp", + "nara.nara.jp", + "nosegawa.nara.jp", + "oji.nara.jp", + "ouda.nara.jp", + "oyodo.nara.jp", + "sakurai.nara.jp", + "sango.nara.jp", + "shimoichi.nara.jp", + "shimokitayama.nara.jp", + "shinjo.nara.jp", + "soni.nara.jp", + "takatori.nara.jp", + "tawaramoto.nara.jp", + "tenkawa.nara.jp", + "tenri.nara.jp", + "uda.nara.jp", + "yamatokoriyama.nara.jp", + "yamatotakada.nara.jp", + "yamazoe.nara.jp", + "yoshino.nara.jp", + "aga.niigata.jp", + "agano.niigata.jp", + "gosen.niigata.jp", + "itoigawa.niigata.jp", + "izumozaki.niigata.jp", + "joetsu.niigata.jp", + "kamo.niigata.jp", + "kariwa.niigata.jp", + "kashiwazaki.niigata.jp", + "minamiuonuma.niigata.jp", + "mitsuke.niigata.jp", + "muika.niigata.jp", + "murakami.niigata.jp", + "myoko.niigata.jp", + "nagaoka.niigata.jp", + "niigata.niigata.jp", + "ojiya.niigata.jp", + "omi.niigata.jp", + "sado.niigata.jp", + "sanjo.niigata.jp", + "seiro.niigata.jp", + "seirou.niigata.jp", + "sekikawa.niigata.jp", + "shibata.niigata.jp", + "tagami.niigata.jp", + "tainai.niigata.jp", + "tochio.niigata.jp", + "tokamachi.niigata.jp", + "tsubame.niigata.jp", + "tsunan.niigata.jp", + "uonuma.niigata.jp", + "yahiko.niigata.jp", + "yoita.niigata.jp", + "yuzawa.niigata.jp", + "beppu.oita.jp", + "bungoono.oita.jp", + "bungotakada.oita.jp", + "hasama.oita.jp", + "hiji.oita.jp", + "himeshima.oita.jp", + "hita.oita.jp", + "kamitsue.oita.jp", + "kokonoe.oita.jp", + "kuju.oita.jp", + "kunisaki.oita.jp", + "kusu.oita.jp", + "oita.oita.jp", + "saiki.oita.jp", + "taketa.oita.jp", + "tsukumi.oita.jp", + "usa.oita.jp", + "usuki.oita.jp", + "yufu.oita.jp", + "akaiwa.okayama.jp", + "asakuchi.okayama.jp", + "bizen.okayama.jp", + "hayashima.okayama.jp", + "ibara.okayama.jp", + "kagamino.okayama.jp", + "kasaoka.okayama.jp", + "kibichuo.okayama.jp", + "kumenan.okayama.jp", + "kurashiki.okayama.jp", + "maniwa.okayama.jp", + "misaki.okayama.jp", + "nagi.okayama.jp", + "niimi.okayama.jp", + "nishiawakura.okayama.jp", + "okayama.okayama.jp", + "satosho.okayama.jp", + "setouchi.okayama.jp", + "shinjo.okayama.jp", + "shoo.okayama.jp", + "soja.okayama.jp", + "takahashi.okayama.jp", + "tamano.okayama.jp", + "tsuyama.okayama.jp", + "wake.okayama.jp", + "yakage.okayama.jp", + "aguni.okinawa.jp", + "ginowan.okinawa.jp", + "ginoza.okinawa.jp", + "gushikami.okinawa.jp", + "haebaru.okinawa.jp", + "higashi.okinawa.jp", + "hirara.okinawa.jp", + "iheya.okinawa.jp", + "ishigaki.okinawa.jp", + "ishikawa.okinawa.jp", + "itoman.okinawa.jp", + "izena.okinawa.jp", + "kadena.okinawa.jp", + "kin.okinawa.jp", + "kitadaito.okinawa.jp", + "kitanakagusuku.okinawa.jp", + "kumejima.okinawa.jp", + "kunigami.okinawa.jp", + "minamidaito.okinawa.jp", + "motobu.okinawa.jp", + "nago.okinawa.jp", + "naha.okinawa.jp", + "nakagusuku.okinawa.jp", + "nakijin.okinawa.jp", + "nanjo.okinawa.jp", + "nishihara.okinawa.jp", + "ogimi.okinawa.jp", + "okinawa.okinawa.jp", + "onna.okinawa.jp", + "shimoji.okinawa.jp", + "taketomi.okinawa.jp", + "tarama.okinawa.jp", + "tokashiki.okinawa.jp", + "tomigusuku.okinawa.jp", + "tonaki.okinawa.jp", + "urasoe.okinawa.jp", + "uruma.okinawa.jp", + "yaese.okinawa.jp", + "yomitan.okinawa.jp", + "yonabaru.okinawa.jp", + "yonaguni.okinawa.jp", + "zamami.okinawa.jp", + "abeno.osaka.jp", + "chihayaakasaka.osaka.jp", + "chuo.osaka.jp", + "daito.osaka.jp", + "fujiidera.osaka.jp", + "habikino.osaka.jp", + "hannan.osaka.jp", + "higashiosaka.osaka.jp", + "higashisumiyoshi.osaka.jp", + "higashiyodogawa.osaka.jp", + "hirakata.osaka.jp", + "ibaraki.osaka.jp", + "ikeda.osaka.jp", + "izumi.osaka.jp", + "izumiotsu.osaka.jp", + "izumisano.osaka.jp", + "kadoma.osaka.jp", + "kaizuka.osaka.jp", + "kanan.osaka.jp", + "kashiwara.osaka.jp", + "katano.osaka.jp", + "kawachinagano.osaka.jp", + "kishiwada.osaka.jp", + "kita.osaka.jp", + "kumatori.osaka.jp", + "matsubara.osaka.jp", + "minato.osaka.jp", + "minoh.osaka.jp", + "misaki.osaka.jp", + "moriguchi.osaka.jp", + "neyagawa.osaka.jp", + "nishi.osaka.jp", + "nose.osaka.jp", + "osakasayama.osaka.jp", + "sakai.osaka.jp", + "sayama.osaka.jp", + "sennan.osaka.jp", + "settsu.osaka.jp", + "shijonawate.osaka.jp", + "shimamoto.osaka.jp", + "suita.osaka.jp", + "tadaoka.osaka.jp", + "taishi.osaka.jp", + "tajiri.osaka.jp", + "takaishi.osaka.jp", + "takatsuki.osaka.jp", + "tondabayashi.osaka.jp", + "toyonaka.osaka.jp", + "toyono.osaka.jp", + "yao.osaka.jp", + "ariake.saga.jp", + "arita.saga.jp", + "fukudomi.saga.jp", + "genkai.saga.jp", + "hamatama.saga.jp", + "hizen.saga.jp", + "imari.saga.jp", + "kamimine.saga.jp", + "kanzaki.saga.jp", + "karatsu.saga.jp", + "kashima.saga.jp", + "kitagata.saga.jp", + "kitahata.saga.jp", + "kiyama.saga.jp", + "kouhoku.saga.jp", + "kyuragi.saga.jp", + "nishiarita.saga.jp", + "ogi.saga.jp", + "omachi.saga.jp", + "ouchi.saga.jp", + "saga.saga.jp", + "shiroishi.saga.jp", + "taku.saga.jp", + "tara.saga.jp", + "tosu.saga.jp", + "yoshinogari.saga.jp", + "arakawa.saitama.jp", + "asaka.saitama.jp", + "chichibu.saitama.jp", + "fujimi.saitama.jp", + "fujimino.saitama.jp", + "fukaya.saitama.jp", + "hanno.saitama.jp", + "hanyu.saitama.jp", + "hasuda.saitama.jp", + "hatogaya.saitama.jp", + "hatoyama.saitama.jp", + "hidaka.saitama.jp", + "higashichichibu.saitama.jp", + "higashimatsuyama.saitama.jp", + "honjo.saitama.jp", + "ina.saitama.jp", + "iruma.saitama.jp", + "iwatsuki.saitama.jp", + "kamiizumi.saitama.jp", + "kamikawa.saitama.jp", + "kamisato.saitama.jp", + "kasukabe.saitama.jp", + "kawagoe.saitama.jp", + "kawaguchi.saitama.jp", + "kawajima.saitama.jp", + "kazo.saitama.jp", + "kitamoto.saitama.jp", + "koshigaya.saitama.jp", + "kounosu.saitama.jp", + "kuki.saitama.jp", + "kumagaya.saitama.jp", + "matsubushi.saitama.jp", + "minano.saitama.jp", + "misato.saitama.jp", + "miyashiro.saitama.jp", + "miyoshi.saitama.jp", + "moroyama.saitama.jp", + "nagatoro.saitama.jp", + "namegawa.saitama.jp", + "niiza.saitama.jp", + "ogano.saitama.jp", + "ogawa.saitama.jp", + "ogose.saitama.jp", + "okegawa.saitama.jp", + "omiya.saitama.jp", + "otaki.saitama.jp", + "ranzan.saitama.jp", + "ryokami.saitama.jp", + "saitama.saitama.jp", + "sakado.saitama.jp", + "satte.saitama.jp", + "sayama.saitama.jp", + "shiki.saitama.jp", + "shiraoka.saitama.jp", + "soka.saitama.jp", + "sugito.saitama.jp", + "toda.saitama.jp", + "tokigawa.saitama.jp", + "tokorozawa.saitama.jp", + "tsurugashima.saitama.jp", + "urawa.saitama.jp", + "warabi.saitama.jp", + "yashio.saitama.jp", + "yokoze.saitama.jp", + "yono.saitama.jp", + "yorii.saitama.jp", + "yoshida.saitama.jp", + "yoshikawa.saitama.jp", + "yoshimi.saitama.jp", + "aisho.shiga.jp", + "gamo.shiga.jp", + "higashiomi.shiga.jp", + "hikone.shiga.jp", + "koka.shiga.jp", + "konan.shiga.jp", + "kosei.shiga.jp", + "koto.shiga.jp", + "kusatsu.shiga.jp", + "maibara.shiga.jp", + "moriyama.shiga.jp", + "nagahama.shiga.jp", + "nishiazai.shiga.jp", + "notogawa.shiga.jp", + "omihachiman.shiga.jp", + "otsu.shiga.jp", + "ritto.shiga.jp", + "ryuoh.shiga.jp", + "takashima.shiga.jp", + "takatsuki.shiga.jp", + "torahime.shiga.jp", + "toyosato.shiga.jp", + "yasu.shiga.jp", + "akagi.shimane.jp", + "ama.shimane.jp", + "gotsu.shimane.jp", + "hamada.shimane.jp", + "higashiizumo.shimane.jp", + "hikawa.shimane.jp", + "hikimi.shimane.jp", + "izumo.shimane.jp", + "kakinoki.shimane.jp", + "masuda.shimane.jp", + "matsue.shimane.jp", + "misato.shimane.jp", + "nishinoshima.shimane.jp", + "ohda.shimane.jp", + "okinoshima.shimane.jp", + "okuizumo.shimane.jp", + "shimane.shimane.jp", + "tamayu.shimane.jp", + "tsuwano.shimane.jp", + "unnan.shimane.jp", + "yakumo.shimane.jp", + "yasugi.shimane.jp", + "yatsuka.shimane.jp", + "arai.shizuoka.jp", + "atami.shizuoka.jp", + "fuji.shizuoka.jp", + "fujieda.shizuoka.jp", + "fujikawa.shizuoka.jp", + "fujinomiya.shizuoka.jp", + "fukuroi.shizuoka.jp", + "gotemba.shizuoka.jp", + "haibara.shizuoka.jp", + "hamamatsu.shizuoka.jp", + "higashiizu.shizuoka.jp", + "ito.shizuoka.jp", + "iwata.shizuoka.jp", + "izu.shizuoka.jp", + "izunokuni.shizuoka.jp", + "kakegawa.shizuoka.jp", + "kannami.shizuoka.jp", + "kawanehon.shizuoka.jp", + "kawazu.shizuoka.jp", + "kikugawa.shizuoka.jp", + "kosai.shizuoka.jp", + "makinohara.shizuoka.jp", + "matsuzaki.shizuoka.jp", + "minamiizu.shizuoka.jp", + "mishima.shizuoka.jp", + "morimachi.shizuoka.jp", + "nishiizu.shizuoka.jp", + "numazu.shizuoka.jp", + "omaezaki.shizuoka.jp", + "shimada.shizuoka.jp", + "shimizu.shizuoka.jp", + "shimoda.shizuoka.jp", + "shizuoka.shizuoka.jp", + "susono.shizuoka.jp", + "yaizu.shizuoka.jp", + "yoshida.shizuoka.jp", + "ashikaga.tochigi.jp", + "bato.tochigi.jp", + "haga.tochigi.jp", + "ichikai.tochigi.jp", + "iwafune.tochigi.jp", + "kaminokawa.tochigi.jp", + "kanuma.tochigi.jp", + "karasuyama.tochigi.jp", + "kuroiso.tochigi.jp", + "mashiko.tochigi.jp", + "mibu.tochigi.jp", + "moka.tochigi.jp", + "motegi.tochigi.jp", + "nasu.tochigi.jp", + "nasushiobara.tochigi.jp", + "nikko.tochigi.jp", + "nishikata.tochigi.jp", + "nogi.tochigi.jp", + "ohira.tochigi.jp", + "ohtawara.tochigi.jp", + "oyama.tochigi.jp", + "sakura.tochigi.jp", + "sano.tochigi.jp", + "shimotsuke.tochigi.jp", + "shioya.tochigi.jp", + "takanezawa.tochigi.jp", + "tochigi.tochigi.jp", + "tsuga.tochigi.jp", + "ujiie.tochigi.jp", + "utsunomiya.tochigi.jp", + "yaita.tochigi.jp", + "aizumi.tokushima.jp", + "anan.tokushima.jp", + "ichiba.tokushima.jp", + "itano.tokushima.jp", + "kainan.tokushima.jp", + "komatsushima.tokushima.jp", + "matsushige.tokushima.jp", + "mima.tokushima.jp", + "minami.tokushima.jp", + "miyoshi.tokushima.jp", + "mugi.tokushima.jp", + "nakagawa.tokushima.jp", + "naruto.tokushima.jp", + "sanagochi.tokushima.jp", + "shishikui.tokushima.jp", + "tokushima.tokushima.jp", + "wajiki.tokushima.jp", + "adachi.tokyo.jp", + "akiruno.tokyo.jp", + "akishima.tokyo.jp", + "aogashima.tokyo.jp", + "arakawa.tokyo.jp", + "bunkyo.tokyo.jp", + "chiyoda.tokyo.jp", + "chofu.tokyo.jp", + "chuo.tokyo.jp", + "edogawa.tokyo.jp", + "fuchu.tokyo.jp", + "fussa.tokyo.jp", + "hachijo.tokyo.jp", + "hachioji.tokyo.jp", + "hamura.tokyo.jp", + "higashikurume.tokyo.jp", + "higashimurayama.tokyo.jp", + "higashiyamato.tokyo.jp", + "hino.tokyo.jp", + "hinode.tokyo.jp", + "hinohara.tokyo.jp", + "inagi.tokyo.jp", + "itabashi.tokyo.jp", + "katsushika.tokyo.jp", + "kita.tokyo.jp", + "kiyose.tokyo.jp", + "kodaira.tokyo.jp", + "koganei.tokyo.jp", + "kokubunji.tokyo.jp", + "komae.tokyo.jp", + "koto.tokyo.jp", + "kouzushima.tokyo.jp", + "kunitachi.tokyo.jp", + "machida.tokyo.jp", + "meguro.tokyo.jp", + "minato.tokyo.jp", + "mitaka.tokyo.jp", + "mizuho.tokyo.jp", + "musashimurayama.tokyo.jp", + "musashino.tokyo.jp", + "nakano.tokyo.jp", + "nerima.tokyo.jp", + "ogasawara.tokyo.jp", + "okutama.tokyo.jp", + "ome.tokyo.jp", + "oshima.tokyo.jp", + "ota.tokyo.jp", + "setagaya.tokyo.jp", + "shibuya.tokyo.jp", + "shinagawa.tokyo.jp", + "shinjuku.tokyo.jp", + "suginami.tokyo.jp", + "sumida.tokyo.jp", + "tachikawa.tokyo.jp", + "taito.tokyo.jp", + "tama.tokyo.jp", + "toshima.tokyo.jp", + "chizu.tottori.jp", + "hino.tottori.jp", + "kawahara.tottori.jp", + "koge.tottori.jp", + "kotoura.tottori.jp", + "misasa.tottori.jp", + "nanbu.tottori.jp", + "nichinan.tottori.jp", + "sakaiminato.tottori.jp", + "tottori.tottori.jp", + "wakasa.tottori.jp", + "yazu.tottori.jp", + "yonago.tottori.jp", + "asahi.toyama.jp", + "fuchu.toyama.jp", + "fukumitsu.toyama.jp", + "funahashi.toyama.jp", + "himi.toyama.jp", + "imizu.toyama.jp", + "inami.toyama.jp", + "johana.toyama.jp", + "kamiichi.toyama.jp", + "kurobe.toyama.jp", + "nakaniikawa.toyama.jp", + "namerikawa.toyama.jp", + "nanto.toyama.jp", + "nyuzen.toyama.jp", + "oyabe.toyama.jp", + "taira.toyama.jp", + "takaoka.toyama.jp", + "tateyama.toyama.jp", + "toga.toyama.jp", + "tonami.toyama.jp", + "toyama.toyama.jp", + "unazuki.toyama.jp", + "uozu.toyama.jp", + "yamada.toyama.jp", + "arida.wakayama.jp", + "aridagawa.wakayama.jp", + "gobo.wakayama.jp", + "hashimoto.wakayama.jp", + "hidaka.wakayama.jp", + "hirogawa.wakayama.jp", + "inami.wakayama.jp", + "iwade.wakayama.jp", + "kainan.wakayama.jp", + "kamitonda.wakayama.jp", + "katsuragi.wakayama.jp", + "kimino.wakayama.jp", + "kinokawa.wakayama.jp", + "kitayama.wakayama.jp", + "koya.wakayama.jp", + "koza.wakayama.jp", + "kozagawa.wakayama.jp", + "kudoyama.wakayama.jp", + "kushimoto.wakayama.jp", + "mihama.wakayama.jp", + "misato.wakayama.jp", + "nachikatsuura.wakayama.jp", + "shingu.wakayama.jp", + "shirahama.wakayama.jp", + "taiji.wakayama.jp", + "tanabe.wakayama.jp", + "wakayama.wakayama.jp", + "yuasa.wakayama.jp", + "yura.wakayama.jp", + "asahi.yamagata.jp", + "funagata.yamagata.jp", + "higashine.yamagata.jp", + "iide.yamagata.jp", + "kahoku.yamagata.jp", + "kaminoyama.yamagata.jp", + "kaneyama.yamagata.jp", + "kawanishi.yamagata.jp", + "mamurogawa.yamagata.jp", + "mikawa.yamagata.jp", + "murayama.yamagata.jp", + "nagai.yamagata.jp", + "nakayama.yamagata.jp", + "nanyo.yamagata.jp", + "nishikawa.yamagata.jp", + "obanazawa.yamagata.jp", + "oe.yamagata.jp", + "oguni.yamagata.jp", + "ohkura.yamagata.jp", + "oishida.yamagata.jp", + "sagae.yamagata.jp", + "sakata.yamagata.jp", + "sakegawa.yamagata.jp", + "shinjo.yamagata.jp", + "shirataka.yamagata.jp", + "shonai.yamagata.jp", + "takahata.yamagata.jp", + "tendo.yamagata.jp", + "tozawa.yamagata.jp", + "tsuruoka.yamagata.jp", + "yamagata.yamagata.jp", + "yamanobe.yamagata.jp", + "yonezawa.yamagata.jp", + "yuza.yamagata.jp", + "abu.yamaguchi.jp", + "hagi.yamaguchi.jp", + "hikari.yamaguchi.jp", + "hofu.yamaguchi.jp", + "iwakuni.yamaguchi.jp", + "kudamatsu.yamaguchi.jp", + "mitou.yamaguchi.jp", + "nagato.yamaguchi.jp", + "oshima.yamaguchi.jp", + "shimonoseki.yamaguchi.jp", + "shunan.yamaguchi.jp", + "tabuse.yamaguchi.jp", + "tokuyama.yamaguchi.jp", + "toyota.yamaguchi.jp", + "ube.yamaguchi.jp", + "yuu.yamaguchi.jp", + "chuo.yamanashi.jp", + "doshi.yamanashi.jp", + "fuefuki.yamanashi.jp", + "fujikawa.yamanashi.jp", + "fujikawaguchiko.yamanashi.jp", + "fujiyoshida.yamanashi.jp", + "hayakawa.yamanashi.jp", + "hokuto.yamanashi.jp", + "ichikawamisato.yamanashi.jp", + "kai.yamanashi.jp", + "kofu.yamanashi.jp", + "koshu.yamanashi.jp", + "kosuge.yamanashi.jp", + "minami-alps.yamanashi.jp", + "minobu.yamanashi.jp", + "nakamichi.yamanashi.jp", + "nanbu.yamanashi.jp", + "narusawa.yamanashi.jp", + "nirasaki.yamanashi.jp", + "nishikatsura.yamanashi.jp", + "oshino.yamanashi.jp", + "otsuki.yamanashi.jp", + "showa.yamanashi.jp", + "tabayama.yamanashi.jp", + "tsuru.yamanashi.jp", + "uenohara.yamanashi.jp", + "yamanakako.yamanashi.jp", + "yamanashi.yamanashi.jp", + "ke", + "ac.ke", + "co.ke", + "go.ke", + "info.ke", + "me.ke", + "mobi.ke", + "ne.ke", + "or.ke", + "sc.ke", + "kg", + "org.kg", + "net.kg", + "com.kg", + "edu.kg", + "gov.kg", + "mil.kg", + "*.kh", + "ki", + "edu.ki", + "biz.ki", + "net.ki", + "org.ki", + "gov.ki", + "info.ki", + "com.ki", + "km", + "org.km", + "nom.km", + "gov.km", + "prd.km", + "tm.km", + "edu.km", + "mil.km", + "ass.km", + "com.km", + "coop.km", + "asso.km", + "presse.km", + "medecin.km", + "notaires.km", + "pharmaciens.km", + "veterinaire.km", + "gouv.km", + "kn", + "net.kn", + "org.kn", + "edu.kn", + "gov.kn", + "kp", + "com.kp", + "edu.kp", + "gov.kp", + "org.kp", + "rep.kp", + "tra.kp", + "kr", + "ac.kr", + "co.kr", + "es.kr", + "go.kr", + "hs.kr", + "kg.kr", + "mil.kr", + "ms.kr", + "ne.kr", + "or.kr", + "pe.kr", + "re.kr", + "sc.kr", + "busan.kr", + "chungbuk.kr", + "chungnam.kr", + "daegu.kr", + "daejeon.kr", + "gangwon.kr", + "gwangju.kr", + "gyeongbuk.kr", + "gyeonggi.kr", + "gyeongnam.kr", + "incheon.kr", + "jeju.kr", + "jeonbuk.kr", + "jeonnam.kr", + "seoul.kr", + "ulsan.kr", + "*.kw", + "ky", + "edu.ky", + "gov.ky", + "com.ky", + "org.ky", + "net.ky", + "kz", + "org.kz", + "edu.kz", + "net.kz", + "gov.kz", + "mil.kz", + "com.kz", + "la", + "int.la", + "net.la", + "info.la", + "edu.la", + "gov.la", + "per.la", + "com.la", + "org.la", + "lb", + "com.lb", + "edu.lb", + "gov.lb", + "net.lb", + "org.lb", + "lc", + "com.lc", + "net.lc", + "co.lc", + "org.lc", + "edu.lc", + "gov.lc", + "li", + "lk", + "gov.lk", + "sch.lk", + "net.lk", + "int.lk", + "com.lk", + "org.lk", + "edu.lk", + "ngo.lk", + "soc.lk", + "web.lk", + "ltd.lk", + "assn.lk", + "grp.lk", + "hotel.lk", + "ac.lk", + "lr", + "com.lr", + "edu.lr", + "gov.lr", + "org.lr", + "net.lr", + "ls", + "co.ls", + "org.ls", + "lt", + "gov.lt", + "lu", + "lv", + "com.lv", + "edu.lv", + "gov.lv", + "org.lv", + "mil.lv", + "id.lv", + "net.lv", + "asn.lv", + "conf.lv", + "ly", + "com.ly", + "net.ly", + "gov.ly", + "plc.ly", + "edu.ly", + "sch.ly", + "med.ly", + "org.ly", + "id.ly", + "ma", + "co.ma", + "net.ma", + "gov.ma", + "org.ma", + "ac.ma", + "press.ma", + "mc", + "tm.mc", + "asso.mc", + "md", + "me", + "co.me", + "net.me", + "org.me", + "edu.me", + "ac.me", + "gov.me", + "its.me", + "priv.me", + "mg", + "org.mg", + "nom.mg", + "gov.mg", + "prd.mg", + "tm.mg", + "edu.mg", + "mil.mg", + "com.mg", + "co.mg", + "mh", + "mil", + "mk", + "com.mk", + "org.mk", + "net.mk", + "edu.mk", + "gov.mk", + "inf.mk", + "name.mk", + "ml", + "com.ml", + "edu.ml", + "gouv.ml", + "gov.ml", + "net.ml", + "org.ml", + "presse.ml", + "*.mm", + "mn", + "gov.mn", + "edu.mn", + "org.mn", + "mo", + "com.mo", + "net.mo", + "org.mo", + "edu.mo", + "gov.mo", + "mobi", + "mp", + "mq", + "mr", + "gov.mr", + "ms", + "com.ms", + "edu.ms", + "gov.ms", + "net.ms", + "org.ms", + "mt", + "com.mt", + "edu.mt", + "net.mt", + "org.mt", + "mu", + "com.mu", + "net.mu", + "org.mu", + "gov.mu", + "ac.mu", + "co.mu", + "or.mu", + "museum", + "academy.museum", + "agriculture.museum", + "air.museum", + "airguard.museum", + "alabama.museum", + "alaska.museum", + "amber.museum", + "ambulance.museum", + "american.museum", + "americana.museum", + "americanantiques.museum", + "americanart.museum", + "amsterdam.museum", + "and.museum", + "annefrank.museum", + "anthro.museum", + "anthropology.museum", + "antiques.museum", + "aquarium.museum", + "arboretum.museum", + "archaeological.museum", + "archaeology.museum", + "architecture.museum", + "art.museum", + "artanddesign.museum", + "artcenter.museum", + "artdeco.museum", + "arteducation.museum", + "artgallery.museum", + "arts.museum", + "artsandcrafts.museum", + "asmatart.museum", + "assassination.museum", + "assisi.museum", + "association.museum", + "astronomy.museum", + "atlanta.museum", + "austin.museum", + "australia.museum", + "automotive.museum", + "aviation.museum", + "axis.museum", + "badajoz.museum", + "baghdad.museum", + "bahn.museum", + "bale.museum", + "baltimore.museum", + "barcelona.museum", + "baseball.museum", + "basel.museum", + "baths.museum", + "bauern.museum", + "beauxarts.museum", + "beeldengeluid.museum", + "bellevue.museum", + "bergbau.museum", + "berkeley.museum", + "berlin.museum", + "bern.museum", + "bible.museum", + "bilbao.museum", + "bill.museum", + "birdart.museum", + "birthplace.museum", + "bonn.museum", + "boston.museum", + "botanical.museum", + "botanicalgarden.museum", + "botanicgarden.museum", + "botany.museum", + "brandywinevalley.museum", + "brasil.museum", + "bristol.museum", + "british.museum", + "britishcolumbia.museum", + "broadcast.museum", + "brunel.museum", + "brussel.museum", + "brussels.museum", + "bruxelles.museum", + "building.museum", + "burghof.museum", + "bus.museum", + "bushey.museum", + "cadaques.museum", + "california.museum", + "cambridge.museum", + "can.museum", + "canada.museum", + "capebreton.museum", + "carrier.museum", + "cartoonart.museum", + "casadelamoneda.museum", + "castle.museum", + "castres.museum", + "celtic.museum", + "center.museum", + "chattanooga.museum", + "cheltenham.museum", + "chesapeakebay.museum", + "chicago.museum", + "children.museum", + "childrens.museum", + "childrensgarden.museum", + "chiropractic.museum", + "chocolate.museum", + "christiansburg.museum", + "cincinnati.museum", + "cinema.museum", + "circus.museum", + "civilisation.museum", + "civilization.museum", + "civilwar.museum", + "clinton.museum", + "clock.museum", + "coal.museum", + "coastaldefence.museum", + "cody.museum", + "coldwar.museum", + "collection.museum", + "colonialwilliamsburg.museum", + "coloradoplateau.museum", + "columbia.museum", + "columbus.museum", + "communication.museum", + "communications.museum", + "community.museum", + "computer.museum", + "computerhistory.museum", + "xn--comunicaes-v6a2o.museum", + "contemporary.museum", + "contemporaryart.museum", + "convent.museum", + "copenhagen.museum", + "corporation.museum", + "xn--correios-e-telecomunicaes-ghc29a.museum", + "corvette.museum", + "costume.museum", + "countryestate.museum", + "county.museum", + "crafts.museum", + "cranbrook.museum", + "creation.museum", + "cultural.museum", + "culturalcenter.museum", + "culture.museum", + "cyber.museum", + "cymru.museum", + "dali.museum", + "dallas.museum", + "database.museum", + "ddr.museum", + "decorativearts.museum", + "delaware.museum", + "delmenhorst.museum", + "denmark.museum", + "depot.museum", + "design.museum", + "detroit.museum", + "dinosaur.museum", + "discovery.museum", + "dolls.museum", + "donostia.museum", + "durham.museum", + "eastafrica.museum", + "eastcoast.museum", + "education.museum", + "educational.museum", + "egyptian.museum", + "eisenbahn.museum", + "elburg.museum", + "elvendrell.museum", + "embroidery.museum", + "encyclopedic.museum", + "england.museum", + "entomology.museum", + "environment.museum", + "environmentalconservation.museum", + "epilepsy.museum", + "essex.museum", + "estate.museum", + "ethnology.museum", + "exeter.museum", + "exhibition.museum", + "family.museum", + "farm.museum", + "farmequipment.museum", + "farmers.museum", + "farmstead.museum", + "field.museum", + "figueres.museum", + "filatelia.museum", + "film.museum", + "fineart.museum", + "finearts.museum", + "finland.museum", + "flanders.museum", + "florida.museum", + "force.museum", + "fortmissoula.museum", + "fortworth.museum", + "foundation.museum", + "francaise.museum", + "frankfurt.museum", + "franziskaner.museum", + "freemasonry.museum", + "freiburg.museum", + "fribourg.museum", + "frog.museum", + "fundacio.museum", + "furniture.museum", + "gallery.museum", + "garden.museum", + "gateway.museum", + "geelvinck.museum", + "gemological.museum", + "geology.museum", + "georgia.museum", + "giessen.museum", + "glas.museum", + "glass.museum", + "gorge.museum", + "grandrapids.museum", + "graz.museum", + "guernsey.museum", + "halloffame.museum", + "hamburg.museum", + "handson.museum", + "harvestcelebration.museum", + "hawaii.museum", + "health.museum", + "heimatunduhren.museum", + "hellas.museum", + "helsinki.museum", + "hembygdsforbund.museum", + "heritage.museum", + "histoire.museum", + "historical.museum", + "historicalsociety.museum", + "historichouses.museum", + "historisch.museum", + "historisches.museum", + "history.museum", + "historyofscience.museum", + "horology.museum", + "house.museum", + "humanities.museum", + "illustration.museum", + "imageandsound.museum", + "indian.museum", + "indiana.museum", + "indianapolis.museum", + "indianmarket.museum", + "intelligence.museum", + "interactive.museum", + "iraq.museum", + "iron.museum", + "isleofman.museum", + "jamison.museum", + "jefferson.museum", + "jerusalem.museum", + "jewelry.museum", + "jewish.museum", + "jewishart.museum", + "jfk.museum", + "journalism.museum", + "judaica.museum", + "judygarland.museum", + "juedisches.museum", + "juif.museum", + "karate.museum", + "karikatur.museum", + "kids.museum", + "koebenhavn.museum", + "koeln.museum", + "kunst.museum", + "kunstsammlung.museum", + "kunstunddesign.museum", + "labor.museum", + "labour.museum", + "lajolla.museum", + "lancashire.museum", + "landes.museum", + "lans.museum", + "xn--lns-qla.museum", + "larsson.museum", + "lewismiller.museum", + "lincoln.museum", + "linz.museum", + "living.museum", + "livinghistory.museum", + "localhistory.museum", + "london.museum", + "losangeles.museum", + "louvre.museum", + "loyalist.museum", + "lucerne.museum", + "luxembourg.museum", + "luzern.museum", + "mad.museum", + "madrid.museum", + "mallorca.museum", + "manchester.museum", + "mansion.museum", + "mansions.museum", + "manx.museum", + "marburg.museum", + "maritime.museum", + "maritimo.museum", + "maryland.museum", + "marylhurst.museum", + "media.museum", + "medical.museum", + "medizinhistorisches.museum", + "meeres.museum", + "memorial.museum", + "mesaverde.museum", + "michigan.museum", + "midatlantic.museum", + "military.museum", + "mill.museum", + "miners.museum", + "mining.museum", + "minnesota.museum", + "missile.museum", + "missoula.museum", + "modern.museum", + "moma.museum", + "money.museum", + "monmouth.museum", + "monticello.museum", + "montreal.museum", + "moscow.museum", + "motorcycle.museum", + "muenchen.museum", + "muenster.museum", + "mulhouse.museum", + "muncie.museum", + "museet.museum", + "museumcenter.museum", + "museumvereniging.museum", + "music.museum", + "national.museum", + "nationalfirearms.museum", + "nationalheritage.museum", + "nativeamerican.museum", + "naturalhistory.museum", + "naturalhistorymuseum.museum", + "naturalsciences.museum", + "nature.museum", + "naturhistorisches.museum", + "natuurwetenschappen.museum", + "naumburg.museum", + "naval.museum", + "nebraska.museum", + "neues.museum", + "newhampshire.museum", + "newjersey.museum", + "newmexico.museum", + "newport.museum", + "newspaper.museum", + "newyork.museum", + "niepce.museum", + "norfolk.museum", + "north.museum", + "nrw.museum", + "nuernberg.museum", + "nuremberg.museum", + "nyc.museum", + "nyny.museum", + "oceanographic.museum", + "oceanographique.museum", + "omaha.museum", + "online.museum", + "ontario.museum", + "openair.museum", + "oregon.museum", + "oregontrail.museum", + "otago.museum", + "oxford.museum", + "pacific.museum", + "paderborn.museum", + "palace.museum", + "paleo.museum", + "palmsprings.museum", + "panama.museum", + "paris.museum", + "pasadena.museum", + "pharmacy.museum", + "philadelphia.museum", + "philadelphiaarea.museum", + "philately.museum", + "phoenix.museum", + "photography.museum", + "pilots.museum", + "pittsburgh.museum", + "planetarium.museum", + "plantation.museum", + "plants.museum", + "plaza.museum", + "portal.museum", + "portland.museum", + "portlligat.museum", + "posts-and-telecommunications.museum", + "preservation.museum", + "presidio.museum", + "press.museum", + "project.museum", + "public.museum", + "pubol.museum", + "quebec.museum", + "railroad.museum", + "railway.museum", + "research.museum", + "resistance.museum", + "riodejaneiro.museum", + "rochester.museum", + "rockart.museum", + "roma.museum", + "russia.museum", + "saintlouis.museum", + "salem.museum", + "salvadordali.museum", + "salzburg.museum", + "sandiego.museum", + "sanfrancisco.museum", + "santabarbara.museum", + "santacruz.museum", + "santafe.museum", + "saskatchewan.museum", + "satx.museum", + "savannahga.museum", + "schlesisches.museum", + "schoenbrunn.museum", + "schokoladen.museum", + "school.museum", + "schweiz.museum", + "science.museum", + "scienceandhistory.museum", + "scienceandindustry.museum", + "sciencecenter.museum", + "sciencecenters.museum", + "science-fiction.museum", + "sciencehistory.museum", + "sciences.museum", + "sciencesnaturelles.museum", + "scotland.museum", + "seaport.museum", + "settlement.museum", + "settlers.museum", + "shell.museum", + "sherbrooke.museum", + "sibenik.museum", + "silk.museum", + "ski.museum", + "skole.museum", + "society.museum", + "sologne.museum", + "soundandvision.museum", + "southcarolina.museum", + "southwest.museum", + "space.museum", + "spy.museum", + "square.museum", + "stadt.museum", + "stalbans.museum", + "starnberg.museum", + "state.museum", + "stateofdelaware.museum", + "station.museum", + "steam.museum", + "steiermark.museum", + "stjohn.museum", + "stockholm.museum", + "stpetersburg.museum", + "stuttgart.museum", + "suisse.museum", + "surgeonshall.museum", + "surrey.museum", + "svizzera.museum", + "sweden.museum", + "sydney.museum", + "tank.museum", + "tcm.museum", + "technology.museum", + "telekommunikation.museum", + "television.museum", + "texas.museum", + "textile.museum", + "theater.museum", + "time.museum", + "timekeeping.museum", + "topology.museum", + "torino.museum", + "touch.museum", + "town.museum", + "transport.museum", + "tree.museum", + "trolley.museum", + "trust.museum", + "trustee.museum", + "uhren.museum", + "ulm.museum", + "undersea.museum", + "university.museum", + "usa.museum", + "usantiques.museum", + "usarts.museum", + "uscountryestate.museum", + "usculture.museum", + "usdecorativearts.museum", + "usgarden.museum", + "ushistory.museum", + "ushuaia.museum", + "uslivinghistory.museum", + "utah.museum", + "uvic.museum", + "valley.museum", + "vantaa.museum", + "versailles.museum", + "viking.museum", + "village.museum", + "virginia.museum", + "virtual.museum", + "virtuel.museum", + "vlaanderen.museum", + "volkenkunde.museum", + "wales.museum", + "wallonie.museum", + "war.museum", + "washingtondc.museum", + "watchandclock.museum", + "watch-and-clock.museum", + "western.museum", + "westfalen.museum", + "whaling.museum", + "wildlife.museum", + "williamsburg.museum", + "windmill.museum", + "workshop.museum", + "york.museum", + "yorkshire.museum", + "yosemite.museum", + "youth.museum", + "zoological.museum", + "zoology.museum", + "xn--9dbhblg6di.museum", + "xn--h1aegh.museum", + "mv", + "aero.mv", + "biz.mv", + "com.mv", + "coop.mv", + "edu.mv", + "gov.mv", + "info.mv", + "int.mv", + "mil.mv", + "museum.mv", + "name.mv", + "net.mv", + "org.mv", + "pro.mv", + "mw", + "ac.mw", + "biz.mw", + "co.mw", + "com.mw", + "coop.mw", + "edu.mw", + "gov.mw", + "int.mw", + "museum.mw", + "net.mw", + "org.mw", + "mx", + "com.mx", + "org.mx", + "gob.mx", + "edu.mx", + "net.mx", + "my", + "com.my", + "net.my", + "org.my", + "gov.my", + "edu.my", + "mil.my", + "name.my", + "mz", + "ac.mz", + "adv.mz", + "co.mz", + "edu.mz", + "gov.mz", + "mil.mz", + "net.mz", + "org.mz", + "na", + "info.na", + "pro.na", + "name.na", + "school.na", + "or.na", + "dr.na", + "us.na", + "mx.na", + "ca.na", + "in.na", + "cc.na", + "tv.na", + "ws.na", + "mobi.na", + "co.na", + "com.na", + "org.na", + "name", + "nc", + "asso.nc", + "nom.nc", + "ne", + "net", + "nf", + "com.nf", + "net.nf", + "per.nf", + "rec.nf", + "web.nf", + "arts.nf", + "firm.nf", + "info.nf", + "other.nf", + "store.nf", + "ng", + "com.ng", + "edu.ng", + "gov.ng", + "i.ng", + "mil.ng", + "mobi.ng", + "name.ng", + "net.ng", + "org.ng", + "sch.ng", + "ni", + "ac.ni", + "biz.ni", + "co.ni", + "com.ni", + "edu.ni", + "gob.ni", + "in.ni", + "info.ni", + "int.ni", + "mil.ni", + "net.ni", + "nom.ni", + "org.ni", + "web.ni", + "nl", + "bv.nl", + "no", + "fhs.no", + "vgs.no", + "fylkesbibl.no", + "folkebibl.no", + "museum.no", + "idrett.no", + "priv.no", + "mil.no", + "stat.no", + "dep.no", + "kommune.no", + "herad.no", + "aa.no", + "ah.no", + "bu.no", + "fm.no", + "hl.no", + "hm.no", + "jan-mayen.no", + "mr.no", + "nl.no", + "nt.no", + "of.no", + "ol.no", + "oslo.no", + "rl.no", + "sf.no", + "st.no", + "svalbard.no", + "tm.no", + "tr.no", + "va.no", + "vf.no", + "gs.aa.no", + "gs.ah.no", + "gs.bu.no", + "gs.fm.no", + "gs.hl.no", + "gs.hm.no", + "gs.jan-mayen.no", + "gs.mr.no", + "gs.nl.no", + "gs.nt.no", + "gs.of.no", + "gs.ol.no", + "gs.oslo.no", + "gs.rl.no", + "gs.sf.no", + "gs.st.no", + "gs.svalbard.no", + "gs.tm.no", + "gs.tr.no", + "gs.va.no", + "gs.vf.no", + "akrehamn.no", + "xn--krehamn-dxa.no", + "algard.no", + "xn--lgrd-poac.no", + "arna.no", + "brumunddal.no", + "bryne.no", + "bronnoysund.no", + "xn--brnnysund-m8ac.no", + "drobak.no", + "xn--drbak-wua.no", + "egersund.no", + "fetsund.no", + "floro.no", + "xn--flor-jra.no", + "fredrikstad.no", + "hokksund.no", + "honefoss.no", + "xn--hnefoss-q1a.no", + "jessheim.no", + "jorpeland.no", + "xn--jrpeland-54a.no", + "kirkenes.no", + "kopervik.no", + "krokstadelva.no", + "langevag.no", + "xn--langevg-jxa.no", + "leirvik.no", + "mjondalen.no", + "xn--mjndalen-64a.no", + "mo-i-rana.no", + "mosjoen.no", + "xn--mosjen-eya.no", + "nesoddtangen.no", + "orkanger.no", + "osoyro.no", + "xn--osyro-wua.no", + "raholt.no", + "xn--rholt-mra.no", + "sandnessjoen.no", + "xn--sandnessjen-ogb.no", + "skedsmokorset.no", + "slattum.no", + "spjelkavik.no", + "stathelle.no", + "stavern.no", + "stjordalshalsen.no", + "xn--stjrdalshalsen-sqb.no", + "tananger.no", + "tranby.no", + "vossevangen.no", + "afjord.no", + "xn--fjord-lra.no", + "agdenes.no", + "al.no", + "xn--l-1fa.no", + "alesund.no", + "xn--lesund-hua.no", + "alstahaug.no", + "alta.no", + "xn--lt-liac.no", + "alaheadju.no", + "xn--laheadju-7ya.no", + "alvdal.no", + "amli.no", + "xn--mli-tla.no", + "amot.no", + "xn--mot-tla.no", + "andebu.no", + "andoy.no", + "xn--andy-ira.no", + "andasuolo.no", + "ardal.no", + "xn--rdal-poa.no", + "aremark.no", + "arendal.no", + "xn--s-1fa.no", + "aseral.no", + "xn--seral-lra.no", + "asker.no", + "askim.no", + "askvoll.no", + "askoy.no", + "xn--asky-ira.no", + "asnes.no", + "xn--snes-poa.no", + "audnedaln.no", + "aukra.no", + "aure.no", + "aurland.no", + "aurskog-holand.no", + "xn--aurskog-hland-jnb.no", + "austevoll.no", + "austrheim.no", + "averoy.no", + "xn--avery-yua.no", + "balestrand.no", + "ballangen.no", + "balat.no", + "xn--blt-elab.no", + "balsfjord.no", + "bahccavuotna.no", + "xn--bhccavuotna-k7a.no", + "bamble.no", + "bardu.no", + "beardu.no", + "beiarn.no", + "bajddar.no", + "xn--bjddar-pta.no", + "baidar.no", + "xn--bidr-5nac.no", + "berg.no", + "bergen.no", + "berlevag.no", + "xn--berlevg-jxa.no", + "bearalvahki.no", + "xn--bearalvhki-y4a.no", + "bindal.no", + "birkenes.no", + "bjarkoy.no", + "xn--bjarky-fya.no", + "bjerkreim.no", + "bjugn.no", + "bodo.no", + "xn--bod-2na.no", + "badaddja.no", + "xn--bdddj-mrabd.no", + "budejju.no", + "bokn.no", + "bremanger.no", + "bronnoy.no", + "xn--brnny-wuac.no", + "bygland.no", + "bykle.no", + "barum.no", + "xn--brum-voa.no", + "bo.telemark.no", + "xn--b-5ga.telemark.no", + "bo.nordland.no", + "xn--b-5ga.nordland.no", + "bievat.no", + "xn--bievt-0qa.no", + "bomlo.no", + "xn--bmlo-gra.no", + "batsfjord.no", + "xn--btsfjord-9za.no", + "bahcavuotna.no", + "xn--bhcavuotna-s4a.no", + "dovre.no", + "drammen.no", + "drangedal.no", + "dyroy.no", + "xn--dyry-ira.no", + "donna.no", + "xn--dnna-gra.no", + "eid.no", + "eidfjord.no", + "eidsberg.no", + "eidskog.no", + "eidsvoll.no", + "eigersund.no", + "elverum.no", + "enebakk.no", + "engerdal.no", + "etne.no", + "etnedal.no", + "evenes.no", + "evenassi.no", + "xn--eveni-0qa01ga.no", + "evje-og-hornnes.no", + "farsund.no", + "fauske.no", + "fuossko.no", + "fuoisku.no", + "fedje.no", + "fet.no", + "finnoy.no", + "xn--finny-yua.no", + "fitjar.no", + "fjaler.no", + "fjell.no", + "flakstad.no", + "flatanger.no", + "flekkefjord.no", + "flesberg.no", + "flora.no", + "fla.no", + "xn--fl-zia.no", + "folldal.no", + "forsand.no", + "fosnes.no", + "frei.no", + "frogn.no", + "froland.no", + "frosta.no", + "frana.no", + "xn--frna-woa.no", + "froya.no", + "xn--frya-hra.no", + "fusa.no", + "fyresdal.no", + "forde.no", + "xn--frde-gra.no", + "gamvik.no", + "gangaviika.no", + "xn--ggaviika-8ya47h.no", + "gaular.no", + "gausdal.no", + "gildeskal.no", + "xn--gildeskl-g0a.no", + "giske.no", + "gjemnes.no", + "gjerdrum.no", + "gjerstad.no", + "gjesdal.no", + "gjovik.no", + "xn--gjvik-wua.no", + "gloppen.no", + "gol.no", + "gran.no", + "grane.no", + "granvin.no", + "gratangen.no", + "grimstad.no", + "grong.no", + "kraanghke.no", + "xn--kranghke-b0a.no", + "grue.no", + "gulen.no", + "hadsel.no", + "halden.no", + "halsa.no", + "hamar.no", + "hamaroy.no", + "habmer.no", + "xn--hbmer-xqa.no", + "hapmir.no", + "xn--hpmir-xqa.no", + "hammerfest.no", + "hammarfeasta.no", + "xn--hmmrfeasta-s4ac.no", + "haram.no", + "hareid.no", + "harstad.no", + "hasvik.no", + "aknoluokta.no", + "xn--koluokta-7ya57h.no", + "hattfjelldal.no", + "aarborte.no", + "haugesund.no", + "hemne.no", + "hemnes.no", + "hemsedal.no", + "heroy.more-og-romsdal.no", + "xn--hery-ira.xn--mre-og-romsdal-qqb.no", + "heroy.nordland.no", + "xn--hery-ira.nordland.no", + "hitra.no", + "hjartdal.no", + "hjelmeland.no", + "hobol.no", + "xn--hobl-ira.no", + "hof.no", + "hol.no", + "hole.no", + "holmestrand.no", + "holtalen.no", + "xn--holtlen-hxa.no", + "hornindal.no", + "horten.no", + "hurdal.no", + "hurum.no", + "hvaler.no", + "hyllestad.no", + "hagebostad.no", + "xn--hgebostad-g3a.no", + "hoyanger.no", + "xn--hyanger-q1a.no", + "hoylandet.no", + "xn--hylandet-54a.no", + "ha.no", + "xn--h-2fa.no", + "ibestad.no", + "inderoy.no", + "xn--indery-fya.no", + "iveland.no", + "jevnaker.no", + "jondal.no", + "jolster.no", + "xn--jlster-bya.no", + "karasjok.no", + "karasjohka.no", + "xn--krjohka-hwab49j.no", + "karlsoy.no", + "galsa.no", + "xn--gls-elac.no", + "karmoy.no", + "xn--karmy-yua.no", + "kautokeino.no", + "guovdageaidnu.no", + "klepp.no", + "klabu.no", + "xn--klbu-woa.no", + "kongsberg.no", + "kongsvinger.no", + "kragero.no", + "xn--krager-gya.no", + "kristiansand.no", + "kristiansund.no", + "krodsherad.no", + "xn--krdsherad-m8a.no", + "kvalsund.no", + "rahkkeravju.no", + "xn--rhkkervju-01af.no", + "kvam.no", + "kvinesdal.no", + "kvinnherad.no", + "kviteseid.no", + "kvitsoy.no", + "xn--kvitsy-fya.no", + "kvafjord.no", + "xn--kvfjord-nxa.no", + "giehtavuoatna.no", + "kvanangen.no", + "xn--kvnangen-k0a.no", + "navuotna.no", + "xn--nvuotna-hwa.no", + "kafjord.no", + "xn--kfjord-iua.no", + "gaivuotna.no", + "xn--givuotna-8ya.no", + "larvik.no", + "lavangen.no", + "lavagis.no", + "loabat.no", + "xn--loabt-0qa.no", + "lebesby.no", + "davvesiida.no", + "leikanger.no", + "leirfjord.no", + "leka.no", + "leksvik.no", + "lenvik.no", + "leangaviika.no", + "xn--leagaviika-52b.no", + "lesja.no", + "levanger.no", + "lier.no", + "lierne.no", + "lillehammer.no", + "lillesand.no", + "lindesnes.no", + "lindas.no", + "xn--linds-pra.no", + "lom.no", + "loppa.no", + "lahppi.no", + "xn--lhppi-xqa.no", + "lund.no", + "lunner.no", + "luroy.no", + "xn--lury-ira.no", + "luster.no", + "lyngdal.no", + "lyngen.no", + "ivgu.no", + "lardal.no", + "lerdal.no", + "xn--lrdal-sra.no", + "lodingen.no", + "xn--ldingen-q1a.no", + "lorenskog.no", + "xn--lrenskog-54a.no", + "loten.no", + "xn--lten-gra.no", + "malvik.no", + "masoy.no", + "xn--msy-ula0h.no", + "muosat.no", + "xn--muost-0qa.no", + "mandal.no", + "marker.no", + "marnardal.no", + "masfjorden.no", + "meland.no", + "meldal.no", + "melhus.no", + "meloy.no", + "xn--mely-ira.no", + "meraker.no", + "xn--merker-kua.no", + "moareke.no", + "xn--moreke-jua.no", + "midsund.no", + "midtre-gauldal.no", + "modalen.no", + "modum.no", + "molde.no", + "moskenes.no", + "moss.no", + "mosvik.no", + "malselv.no", + "xn--mlselv-iua.no", + "malatvuopmi.no", + "xn--mlatvuopmi-s4a.no", + "namdalseid.no", + "aejrie.no", + "namsos.no", + "namsskogan.no", + "naamesjevuemie.no", + "xn--nmesjevuemie-tcba.no", + "laakesvuemie.no", + "nannestad.no", + "narvik.no", + "narviika.no", + "naustdal.no", + "nedre-eiker.no", + "nes.akershus.no", + "nes.buskerud.no", + "nesna.no", + "nesodden.no", + "nesseby.no", + "unjarga.no", + "xn--unjrga-rta.no", + "nesset.no", + "nissedal.no", + "nittedal.no", + "nord-aurdal.no", + "nord-fron.no", + "nord-odal.no", + "norddal.no", + "nordkapp.no", + "davvenjarga.no", + "xn--davvenjrga-y4a.no", + "nordre-land.no", + "nordreisa.no", + "raisa.no", + "xn--risa-5na.no", + "nore-og-uvdal.no", + "notodden.no", + "naroy.no", + "xn--nry-yla5g.no", + "notteroy.no", + "xn--nttery-byae.no", + "odda.no", + "oksnes.no", + "xn--ksnes-uua.no", + "oppdal.no", + "oppegard.no", + "xn--oppegrd-ixa.no", + "orkdal.no", + "orland.no", + "xn--rland-uua.no", + "orskog.no", + "xn--rskog-uua.no", + "orsta.no", + "xn--rsta-fra.no", + "os.hedmark.no", + "os.hordaland.no", + "osen.no", + "osteroy.no", + "xn--ostery-fya.no", + "ostre-toten.no", + "xn--stre-toten-zcb.no", + "overhalla.no", + "ovre-eiker.no", + "xn--vre-eiker-k8a.no", + "oyer.no", + "xn--yer-zna.no", + "oygarden.no", + "xn--ygarden-p1a.no", + "oystre-slidre.no", + "xn--ystre-slidre-ujb.no", + "porsanger.no", + "porsangu.no", + "xn--porsgu-sta26f.no", + "porsgrunn.no", + "radoy.no", + "xn--rady-ira.no", + "rakkestad.no", + "rana.no", + "ruovat.no", + "randaberg.no", + "rauma.no", + "rendalen.no", + "rennebu.no", + "rennesoy.no", + "xn--rennesy-v1a.no", + "rindal.no", + "ringebu.no", + "ringerike.no", + "ringsaker.no", + "rissa.no", + "risor.no", + "xn--risr-ira.no", + "roan.no", + "rollag.no", + "rygge.no", + "ralingen.no", + "xn--rlingen-mxa.no", + "rodoy.no", + "xn--rdy-0nab.no", + "romskog.no", + "xn--rmskog-bya.no", + "roros.no", + "xn--rros-gra.no", + "rost.no", + "xn--rst-0na.no", + "royken.no", + "xn--ryken-vua.no", + "royrvik.no", + "xn--ryrvik-bya.no", + "rade.no", + "xn--rde-ula.no", + "salangen.no", + "siellak.no", + "saltdal.no", + "salat.no", + "xn--slt-elab.no", + "xn--slat-5na.no", + "samnanger.no", + "sande.more-og-romsdal.no", + "sande.xn--mre-og-romsdal-qqb.no", + "sande.vestfold.no", + "sandefjord.no", + "sandnes.no", + "sandoy.no", + "xn--sandy-yua.no", + "sarpsborg.no", + "sauda.no", + "sauherad.no", + "sel.no", + "selbu.no", + "selje.no", + "seljord.no", + "sigdal.no", + "siljan.no", + "sirdal.no", + "skaun.no", + "skedsmo.no", + "ski.no", + "skien.no", + "skiptvet.no", + "skjervoy.no", + "xn--skjervy-v1a.no", + "skierva.no", + "xn--skierv-uta.no", + "skjak.no", + "xn--skjk-soa.no", + "skodje.no", + "skanland.no", + "xn--sknland-fxa.no", + "skanit.no", + "xn--sknit-yqa.no", + "smola.no", + "xn--smla-hra.no", + "snillfjord.no", + "snasa.no", + "xn--snsa-roa.no", + "snoasa.no", + "snaase.no", + "xn--snase-nra.no", + "sogndal.no", + "sokndal.no", + "sola.no", + "solund.no", + "songdalen.no", + "sortland.no", + "spydeberg.no", + "stange.no", + "stavanger.no", + "steigen.no", + "steinkjer.no", + "stjordal.no", + "xn--stjrdal-s1a.no", + "stokke.no", + "stor-elvdal.no", + "stord.no", + "stordal.no", + "storfjord.no", + "omasvuotna.no", + "strand.no", + "stranda.no", + "stryn.no", + "sula.no", + "suldal.no", + "sund.no", + "sunndal.no", + "surnadal.no", + "sveio.no", + "svelvik.no", + "sykkylven.no", + "sogne.no", + "xn--sgne-gra.no", + "somna.no", + "xn--smna-gra.no", + "sondre-land.no", + "xn--sndre-land-0cb.no", + "sor-aurdal.no", + "xn--sr-aurdal-l8a.no", + "sor-fron.no", + "xn--sr-fron-q1a.no", + "sor-odal.no", + "xn--sr-odal-q1a.no", + "sor-varanger.no", + "xn--sr-varanger-ggb.no", + "matta-varjjat.no", + "xn--mtta-vrjjat-k7af.no", + "sorfold.no", + "xn--srfold-bya.no", + "sorreisa.no", + "xn--srreisa-q1a.no", + "sorum.no", + "xn--srum-gra.no", + "tana.no", + "deatnu.no", + "time.no", + "tingvoll.no", + "tinn.no", + "tjeldsund.no", + "dielddanuorri.no", + "tjome.no", + "xn--tjme-hra.no", + "tokke.no", + "tolga.no", + "torsken.no", + "tranoy.no", + "xn--trany-yua.no", + "tromso.no", + "xn--troms-zua.no", + "tromsa.no", + "romsa.no", + "trondheim.no", + "troandin.no", + "trysil.no", + "trana.no", + "xn--trna-woa.no", + "trogstad.no", + "xn--trgstad-r1a.no", + "tvedestrand.no", + "tydal.no", + "tynset.no", + "tysfjord.no", + "divtasvuodna.no", + "divttasvuotna.no", + "tysnes.no", + "tysvar.no", + "xn--tysvr-vra.no", + "tonsberg.no", + "xn--tnsberg-q1a.no", + "ullensaker.no", + "ullensvang.no", + "ulvik.no", + "utsira.no", + "vadso.no", + "xn--vads-jra.no", + "cahcesuolo.no", + "xn--hcesuolo-7ya35b.no", + "vaksdal.no", + "valle.no", + "vang.no", + "vanylven.no", + "vardo.no", + "xn--vard-jra.no", + "varggat.no", + "xn--vrggt-xqad.no", + "vefsn.no", + "vaapste.no", + "vega.no", + "vegarshei.no", + "xn--vegrshei-c0a.no", + "vennesla.no", + "verdal.no", + "verran.no", + "vestby.no", + "vestnes.no", + "vestre-slidre.no", + "vestre-toten.no", + "vestvagoy.no", + "xn--vestvgy-ixa6o.no", + "vevelstad.no", + "vik.no", + "vikna.no", + "vindafjord.no", + "volda.no", + "voss.no", + "varoy.no", + "xn--vry-yla5g.no", + "vagan.no", + "xn--vgan-qoa.no", + "voagat.no", + "vagsoy.no", + "xn--vgsy-qoa0j.no", + "vaga.no", + "xn--vg-yiab.no", + "valer.ostfold.no", + "xn--vler-qoa.xn--stfold-9xa.no", + "valer.hedmark.no", + "xn--vler-qoa.hedmark.no", + "*.np", + "nr", + "biz.nr", + "info.nr", + "gov.nr", + "edu.nr", + "org.nr", + "net.nr", + "com.nr", + "nu", + "nz", + "ac.nz", + "co.nz", + "cri.nz", + "geek.nz", + "gen.nz", + "govt.nz", + "health.nz", + "iwi.nz", + "kiwi.nz", + "maori.nz", + "mil.nz", + "xn--mori-qsa.nz", + "net.nz", + "org.nz", + "parliament.nz", + "school.nz", + "om", + "co.om", + "com.om", + "edu.om", + "gov.om", + "med.om", + "museum.om", + "net.om", + "org.om", + "pro.om", + "onion", + "org", + "pa", + "ac.pa", + "gob.pa", + "com.pa", + "org.pa", + "sld.pa", + "edu.pa", + "net.pa", + "ing.pa", + "abo.pa", + "med.pa", + "nom.pa", + "pe", + "edu.pe", + "gob.pe", + "nom.pe", + "mil.pe", + "org.pe", + "com.pe", + "net.pe", + "pf", + "com.pf", + "org.pf", + "edu.pf", + "*.pg", + "ph", + "com.ph", + "net.ph", + "org.ph", + "gov.ph", + "edu.ph", + "ngo.ph", + "mil.ph", + "i.ph", + "pk", + "com.pk", + "net.pk", + "edu.pk", + "org.pk", + "fam.pk", + "biz.pk", + "web.pk", + "gov.pk", + "gob.pk", + "gok.pk", + "gon.pk", + "gop.pk", + "gos.pk", + "info.pk", + "pl", + "com.pl", + "net.pl", + "org.pl", + "aid.pl", + "agro.pl", + "atm.pl", + "auto.pl", + "biz.pl", + "edu.pl", + "gmina.pl", + "gsm.pl", + "info.pl", + "mail.pl", + "miasta.pl", + "media.pl", + "mil.pl", + "nieruchomosci.pl", + "nom.pl", + "pc.pl", + "powiat.pl", + "priv.pl", + "realestate.pl", + "rel.pl", + "sex.pl", + "shop.pl", + "sklep.pl", + "sos.pl", + "szkola.pl", + "targi.pl", + "tm.pl", + "tourism.pl", + "travel.pl", + "turystyka.pl", + "gov.pl", + "ap.gov.pl", + "ic.gov.pl", + "is.gov.pl", + "us.gov.pl", + "kmpsp.gov.pl", + "kppsp.gov.pl", + "kwpsp.gov.pl", + "psp.gov.pl", + "wskr.gov.pl", + "kwp.gov.pl", + "mw.gov.pl", + "ug.gov.pl", + "um.gov.pl", + "umig.gov.pl", + "ugim.gov.pl", + "upow.gov.pl", + "uw.gov.pl", + "starostwo.gov.pl", + "pa.gov.pl", + "po.gov.pl", + "psse.gov.pl", + "pup.gov.pl", + "rzgw.gov.pl", + "sa.gov.pl", + "so.gov.pl", + "sr.gov.pl", + "wsa.gov.pl", + "sko.gov.pl", + "uzs.gov.pl", + "wiih.gov.pl", + "winb.gov.pl", + "pinb.gov.pl", + "wios.gov.pl", + "witd.gov.pl", + "wzmiuw.gov.pl", + "piw.gov.pl", + "wiw.gov.pl", + "griw.gov.pl", + "wif.gov.pl", + "oum.gov.pl", + "sdn.gov.pl", + "zp.gov.pl", + "uppo.gov.pl", + "mup.gov.pl", + "wuoz.gov.pl", + "konsulat.gov.pl", + "oirm.gov.pl", + "augustow.pl", + "babia-gora.pl", + "bedzin.pl", + "beskidy.pl", + "bialowieza.pl", + "bialystok.pl", + "bielawa.pl", + "bieszczady.pl", + "boleslawiec.pl", + "bydgoszcz.pl", + "bytom.pl", + "cieszyn.pl", + "czeladz.pl", + "czest.pl", + "dlugoleka.pl", + "elblag.pl", + "elk.pl", + "glogow.pl", + "gniezno.pl", + "gorlice.pl", + "grajewo.pl", + "ilawa.pl", + "jaworzno.pl", + "jelenia-gora.pl", + "jgora.pl", + "kalisz.pl", + "kazimierz-dolny.pl", + "karpacz.pl", + "kartuzy.pl", + "kaszuby.pl", + "katowice.pl", + "kepno.pl", + "ketrzyn.pl", + "klodzko.pl", + "kobierzyce.pl", + "kolobrzeg.pl", + "konin.pl", + "konskowola.pl", + "kutno.pl", + "lapy.pl", + "lebork.pl", + "legnica.pl", + "lezajsk.pl", + "limanowa.pl", + "lomza.pl", + "lowicz.pl", + "lubin.pl", + "lukow.pl", + "malbork.pl", + "malopolska.pl", + "mazowsze.pl", + "mazury.pl", + "mielec.pl", + "mielno.pl", + "mragowo.pl", + "naklo.pl", + "nowaruda.pl", + "nysa.pl", + "olawa.pl", + "olecko.pl", + "olkusz.pl", + "olsztyn.pl", + "opoczno.pl", + "opole.pl", + "ostroda.pl", + "ostroleka.pl", + "ostrowiec.pl", + "ostrowwlkp.pl", + "pila.pl", + "pisz.pl", + "podhale.pl", + "podlasie.pl", + "polkowice.pl", + "pomorze.pl", + "pomorskie.pl", + "prochowice.pl", + "pruszkow.pl", + "przeworsk.pl", + "pulawy.pl", + "radom.pl", + "rawa-maz.pl", + "rybnik.pl", + "rzeszow.pl", + "sanok.pl", + "sejny.pl", + "slask.pl", + "slupsk.pl", + "sosnowiec.pl", + "stalowa-wola.pl", + "skoczow.pl", + "starachowice.pl", + "stargard.pl", + "suwalki.pl", + "swidnica.pl", + "swiebodzin.pl", + "swinoujscie.pl", + "szczecin.pl", + "szczytno.pl", + "tarnobrzeg.pl", + "tgory.pl", + "turek.pl", + "tychy.pl", + "ustka.pl", + "walbrzych.pl", + "warmia.pl", + "warszawa.pl", + "waw.pl", + "wegrow.pl", + "wielun.pl", + "wlocl.pl", + "wloclawek.pl", + "wodzislaw.pl", + "wolomin.pl", + "wroclaw.pl", + "zachpomor.pl", + "zagan.pl", + "zarow.pl", + "zgora.pl", + "zgorzelec.pl", + "pm", + "pn", + "gov.pn", + "co.pn", + "org.pn", + "edu.pn", + "net.pn", + "post", + "pr", + "com.pr", + "net.pr", + "org.pr", + "gov.pr", + "edu.pr", + "isla.pr", + "pro.pr", + "biz.pr", + "info.pr", + "name.pr", + "est.pr", + "prof.pr", + "ac.pr", + "pro", + "aaa.pro", + "aca.pro", + "acct.pro", + "avocat.pro", + "bar.pro", + "cpa.pro", + "eng.pro", + "jur.pro", + "law.pro", + "med.pro", + "recht.pro", + "ps", + "edu.ps", + "gov.ps", + "sec.ps", + "plo.ps", + "com.ps", + "org.ps", + "net.ps", + "pt", + "net.pt", + "gov.pt", + "org.pt", + "edu.pt", + "int.pt", + "publ.pt", + "com.pt", + "nome.pt", + "pw", + "co.pw", + "ne.pw", + "or.pw", + "ed.pw", + "go.pw", + "belau.pw", + "py", + "com.py", + "coop.py", + "edu.py", + "gov.py", + "mil.py", + "net.py", + "org.py", + "qa", + "com.qa", + "edu.qa", + "gov.qa", + "mil.qa", + "name.qa", + "net.qa", + "org.qa", + "sch.qa", + "re", + "asso.re", + "com.re", + "nom.re", + "ro", + "arts.ro", + "com.ro", + "firm.ro", + "info.ro", + "nom.ro", + "nt.ro", + "org.ro", + "rec.ro", + "store.ro", + "tm.ro", + "www.ro", + "rs", + "ac.rs", + "co.rs", + "edu.rs", + "gov.rs", + "in.rs", + "org.rs", + "ru", + "ac.ru", + "edu.ru", + "gov.ru", + "int.ru", + "mil.ru", + "test.ru", + "rw", + "gov.rw", + "net.rw", + "edu.rw", + "ac.rw", + "com.rw", + "co.rw", + "int.rw", + "mil.rw", + "gouv.rw", + "sa", + "com.sa", + "net.sa", + "org.sa", + "gov.sa", + "med.sa", + "pub.sa", + "edu.sa", + "sch.sa", + "sb", + "com.sb", + "edu.sb", + "gov.sb", + "net.sb", + "org.sb", + "sc", + "com.sc", + "gov.sc", + "net.sc", + "org.sc", + "edu.sc", + "sd", + "com.sd", + "net.sd", + "org.sd", + "edu.sd", + "med.sd", + "tv.sd", + "gov.sd", + "info.sd", + "se", + "a.se", + "ac.se", + "b.se", + "bd.se", + "brand.se", + "c.se", + "d.se", + "e.se", + "f.se", + "fh.se", + "fhsk.se", + "fhv.se", + "g.se", + "h.se", + "i.se", + "k.se", + "komforb.se", + "kommunalforbund.se", + "komvux.se", + "l.se", + "lanbib.se", + "m.se", + "n.se", + "naturbruksgymn.se", + "o.se", + "org.se", + "p.se", + "parti.se", + "pp.se", + "press.se", + "r.se", + "s.se", + "t.se", + "tm.se", + "u.se", + "w.se", + "x.se", + "y.se", + "z.se", + "sg", + "com.sg", + "net.sg", + "org.sg", + "gov.sg", + "edu.sg", + "per.sg", + "sh", + "com.sh", + "net.sh", + "gov.sh", + "org.sh", + "mil.sh", + "si", + "sj", + "sk", + "sl", + "com.sl", + "net.sl", + "edu.sl", + "gov.sl", + "org.sl", + "sm", + "sn", + "art.sn", + "com.sn", + "edu.sn", + "gouv.sn", + "org.sn", + "perso.sn", + "univ.sn", + "so", + "com.so", + "net.so", + "org.so", + "sr", + "st", + "co.st", + "com.st", + "consulado.st", + "edu.st", + "embaixada.st", + "gov.st", + "mil.st", + "net.st", + "org.st", + "principe.st", + "saotome.st", + "store.st", + "su", + "sv", + "com.sv", + "edu.sv", + "gob.sv", + "org.sv", + "red.sv", + "sx", + "gov.sx", + "sy", + "edu.sy", + "gov.sy", + "net.sy", + "mil.sy", + "com.sy", + "org.sy", + "sz", + "co.sz", + "ac.sz", + "org.sz", + "tc", + "td", + "tel", + "tf", + "tg", + "th", + "ac.th", + "co.th", + "go.th", + "in.th", + "mi.th", + "net.th", + "or.th", + "tj", + "ac.tj", + "biz.tj", + "co.tj", + "com.tj", + "edu.tj", + "go.tj", + "gov.tj", + "int.tj", + "mil.tj", + "name.tj", + "net.tj", + "nic.tj", + "org.tj", + "test.tj", + "web.tj", + "tk", + "tl", + "gov.tl", + "tm", + "com.tm", + "co.tm", + "org.tm", + "net.tm", + "nom.tm", + "gov.tm", + "mil.tm", + "edu.tm", + "tn", + "com.tn", + "ens.tn", + "fin.tn", + "gov.tn", + "ind.tn", + "intl.tn", + "nat.tn", + "net.tn", + "org.tn", + "info.tn", + "perso.tn", + "tourism.tn", + "edunet.tn", + "rnrt.tn", + "rns.tn", + "rnu.tn", + "mincom.tn", + "agrinet.tn", + "defense.tn", + "turen.tn", + "to", + "com.to", + "gov.to", + "net.to", + "org.to", + "edu.to", + "mil.to", + "tr", + "com.tr", + "info.tr", + "biz.tr", + "net.tr", + "org.tr", + "web.tr", + "gen.tr", + "tv.tr", + "av.tr", + "dr.tr", + "bbs.tr", + "name.tr", + "tel.tr", + "gov.tr", + "bel.tr", + "pol.tr", + "mil.tr", + "k12.tr", + "edu.tr", + "kep.tr", + "nc.tr", + "gov.nc.tr", + "travel", + "tt", + "co.tt", + "com.tt", + "org.tt", + "net.tt", + "biz.tt", + "info.tt", + "pro.tt", + "int.tt", + "coop.tt", + "jobs.tt", + "mobi.tt", + "travel.tt", + "museum.tt", + "aero.tt", + "name.tt", + "gov.tt", + "edu.tt", + "tv", + "tw", + "edu.tw", + "gov.tw", + "mil.tw", + "com.tw", + "net.tw", + "org.tw", + "idv.tw", + "game.tw", + "ebiz.tw", + "club.tw", + "xn--zf0ao64a.tw", + "xn--uc0atv.tw", + "xn--czrw28b.tw", + "tz", + "ac.tz", + "co.tz", + "go.tz", + "hotel.tz", + "info.tz", + "me.tz", + "mil.tz", + "mobi.tz", + "ne.tz", + "or.tz", + "sc.tz", + "tv.tz", + "ua", + "com.ua", + "edu.ua", + "gov.ua", + "in.ua", + "net.ua", + "org.ua", + "cherkassy.ua", + "cherkasy.ua", + "chernigov.ua", + "chernihiv.ua", + "chernivtsi.ua", + "chernovtsy.ua", + "ck.ua", + "cn.ua", + "cr.ua", + "crimea.ua", + "cv.ua", + "dn.ua", + "dnepropetrovsk.ua", + "dnipropetrovsk.ua", + "dominic.ua", + "donetsk.ua", + "dp.ua", + "if.ua", + "ivano-frankivsk.ua", + "kh.ua", + "kharkiv.ua", + "kharkov.ua", + "kherson.ua", + "khmelnitskiy.ua", + "khmelnytskyi.ua", + "kiev.ua", + "kirovograd.ua", + "km.ua", + "kr.ua", + "krym.ua", + "ks.ua", + "kv.ua", + "kyiv.ua", + "lg.ua", + "lt.ua", + "lugansk.ua", + "lutsk.ua", + "lv.ua", + "lviv.ua", + "mk.ua", + "mykolaiv.ua", + "nikolaev.ua", + "od.ua", + "odesa.ua", + "odessa.ua", + "pl.ua", + "poltava.ua", + "rivne.ua", + "rovno.ua", + "rv.ua", + "sb.ua", + "sebastopol.ua", + "sevastopol.ua", + "sm.ua", + "sumy.ua", + "te.ua", + "ternopil.ua", + "uz.ua", + "uzhgorod.ua", + "vinnica.ua", + "vinnytsia.ua", + "vn.ua", + "volyn.ua", + "yalta.ua", + "zaporizhzhe.ua", + "zaporizhzhia.ua", + "zhitomir.ua", + "zhytomyr.ua", + "zp.ua", + "zt.ua", + "ug", + "co.ug", + "or.ug", + "ac.ug", + "sc.ug", + "go.ug", + "ne.ug", + "com.ug", + "org.ug", + "uk", + "ac.uk", + "co.uk", + "gov.uk", + "ltd.uk", + "me.uk", + "net.uk", + "nhs.uk", + "org.uk", + "plc.uk", + "police.uk", + "*.sch.uk", + "us", + "dni.us", + "fed.us", + "isa.us", + "kids.us", + "nsn.us", + "ak.us", + "al.us", + "ar.us", + "as.us", + "az.us", + "ca.us", + "co.us", + "ct.us", + "dc.us", + "de.us", + "fl.us", + "ga.us", + "gu.us", + "hi.us", + "ia.us", + "id.us", + "il.us", + "in.us", + "ks.us", + "ky.us", + "la.us", + "ma.us", + "md.us", + "me.us", + "mi.us", + "mn.us", + "mo.us", + "ms.us", + "mt.us", + "nc.us", + "nd.us", + "ne.us", + "nh.us", + "nj.us", + "nm.us", + "nv.us", + "ny.us", + "oh.us", + "ok.us", + "or.us", + "pa.us", + "pr.us", + "ri.us", + "sc.us", + "sd.us", + "tn.us", + "tx.us", + "ut.us", + "vi.us", + "vt.us", + "va.us", + "wa.us", + "wi.us", + "wv.us", + "wy.us", + "k12.ak.us", + "k12.al.us", + "k12.ar.us", + "k12.as.us", + "k12.az.us", + "k12.ca.us", + "k12.co.us", + "k12.ct.us", + "k12.dc.us", + "k12.de.us", + "k12.fl.us", + "k12.ga.us", + "k12.gu.us", + "k12.ia.us", + "k12.id.us", + "k12.il.us", + "k12.in.us", + "k12.ks.us", + "k12.ky.us", + "k12.la.us", + "k12.ma.us", + "k12.md.us", + "k12.me.us", + "k12.mi.us", + "k12.mn.us", + "k12.mo.us", + "k12.ms.us", + "k12.mt.us", + "k12.nc.us", + "k12.ne.us", + "k12.nh.us", + "k12.nj.us", + "k12.nm.us", + "k12.nv.us", + "k12.ny.us", + "k12.oh.us", + "k12.ok.us", + "k12.or.us", + "k12.pa.us", + "k12.pr.us", + "k12.ri.us", + "k12.sc.us", + "k12.tn.us", + "k12.tx.us", + "k12.ut.us", + "k12.vi.us", + "k12.vt.us", + "k12.va.us", + "k12.wa.us", + "k12.wi.us", + "k12.wy.us", + "cc.ak.us", + "cc.al.us", + "cc.ar.us", + "cc.as.us", + "cc.az.us", + "cc.ca.us", + "cc.co.us", + "cc.ct.us", + "cc.dc.us", + "cc.de.us", + "cc.fl.us", + "cc.ga.us", + "cc.gu.us", + "cc.hi.us", + "cc.ia.us", + "cc.id.us", + "cc.il.us", + "cc.in.us", + "cc.ks.us", + "cc.ky.us", + "cc.la.us", + "cc.ma.us", + "cc.md.us", + "cc.me.us", + "cc.mi.us", + "cc.mn.us", + "cc.mo.us", + "cc.ms.us", + "cc.mt.us", + "cc.nc.us", + "cc.nd.us", + "cc.ne.us", + "cc.nh.us", + "cc.nj.us", + "cc.nm.us", + "cc.nv.us", + "cc.ny.us", + "cc.oh.us", + "cc.ok.us", + "cc.or.us", + "cc.pa.us", + "cc.pr.us", + "cc.ri.us", + "cc.sc.us", + "cc.sd.us", + "cc.tn.us", + "cc.tx.us", + "cc.ut.us", + "cc.vi.us", + "cc.vt.us", + "cc.va.us", + "cc.wa.us", + "cc.wi.us", + "cc.wv.us", + "cc.wy.us", + "lib.ak.us", + "lib.al.us", + "lib.ar.us", + "lib.as.us", + "lib.az.us", + "lib.ca.us", + "lib.co.us", + "lib.ct.us", + "lib.dc.us", + "lib.fl.us", + "lib.ga.us", + "lib.gu.us", + "lib.hi.us", + "lib.ia.us", + "lib.id.us", + "lib.il.us", + "lib.in.us", + "lib.ks.us", + "lib.ky.us", + "lib.la.us", + "lib.ma.us", + "lib.md.us", + "lib.me.us", + "lib.mi.us", + "lib.mn.us", + "lib.mo.us", + "lib.ms.us", + "lib.mt.us", + "lib.nc.us", + "lib.nd.us", + "lib.ne.us", + "lib.nh.us", + "lib.nj.us", + "lib.nm.us", + "lib.nv.us", + "lib.ny.us", + "lib.oh.us", + "lib.ok.us", + "lib.or.us", + "lib.pa.us", + "lib.pr.us", + "lib.ri.us", + "lib.sc.us", + "lib.sd.us", + "lib.tn.us", + "lib.tx.us", + "lib.ut.us", + "lib.vi.us", + "lib.vt.us", + "lib.va.us", + "lib.wa.us", + "lib.wi.us", + "lib.wy.us", + "pvt.k12.ma.us", + "chtr.k12.ma.us", + "paroch.k12.ma.us", + "ann-arbor.mi.us", + "cog.mi.us", + "dst.mi.us", + "eaton.mi.us", + "gen.mi.us", + "mus.mi.us", + "tec.mi.us", + "washtenaw.mi.us", + "uy", + "com.uy", + "edu.uy", + "gub.uy", + "mil.uy", + "net.uy", + "org.uy", + "uz", + "co.uz", + "com.uz", + "net.uz", + "org.uz", + "va", + "vc", + "com.vc", + "net.vc", + "org.vc", + "gov.vc", + "mil.vc", + "edu.vc", + "ve", + "arts.ve", + "co.ve", + "com.ve", + "e12.ve", + "edu.ve", + "firm.ve", + "gob.ve", + "gov.ve", + "info.ve", + "int.ve", + "mil.ve", + "net.ve", + "org.ve", + "rec.ve", + "store.ve", + "tec.ve", + "web.ve", + "vg", + "vi", + "co.vi", + "com.vi", + "k12.vi", + "net.vi", + "org.vi", + "vn", + "com.vn", + "net.vn", + "org.vn", + "edu.vn", + "gov.vn", + "int.vn", + "ac.vn", + "biz.vn", + "info.vn", + "name.vn", + "pro.vn", + "health.vn", + "vu", + "com.vu", + "edu.vu", + "net.vu", + "org.vu", + "wf", + "ws", + "com.ws", + "net.ws", + "org.ws", + "gov.ws", + "edu.ws", + "yt", + "xn--mgbaam7a8h", + "xn--y9a3aq", + "xn--54b7fta0cc", + "xn--90ae", + "xn--90ais", + "xn--fiqs8s", + "xn--fiqz9s", + "xn--lgbbat1ad8j", + "xn--wgbh1c", + "xn--e1a4c", + "xn--node", + "xn--qxam", + "xn--j6w193g", + "xn--2scrj9c", + "xn--3hcrj9c", + "xn--45br5cyl", + "xn--h2breg3eve", + "xn--h2brj9c8c", + "xn--mgbgu82a", + "xn--rvc1e0am3e", + "xn--h2brj9c", + "xn--mgbbh1a71e", + "xn--fpcrj9c3d", + "xn--gecrj9c", + "xn--s9brj9c", + "xn--45brj9c", + "xn--xkc2dl3a5ee0h", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "xn--mgbtx2b", + "xn--mgbayh7gpa", + "xn--3e0b707e", + "xn--80ao21a", + "xn--fzc2c9e2c", + "xn--xkc2al3hye2a", + "xn--mgbc0a9azcg", + "xn--d1alf", + "xn--l1acc", + "xn--mix891f", + "xn--mix082f", + "xn--mgbx4cd0ab", + "xn--mgb9awbf", + "xn--mgbai9azgqp6j", + "xn--mgbai9a5eva00b", + "xn--ygbi2ammx", + "xn--90a3ac", + "xn--o1ac.xn--90a3ac", + "xn--c1avg.xn--90a3ac", + "xn--90azh.xn--90a3ac", + "xn--d1at.xn--90a3ac", + "xn--o1ach.xn--90a3ac", + "xn--80au.xn--90a3ac", + "xn--p1ai", + "xn--wgbl6a", + "xn--mgberp4a5d4ar", + "xn--mgberp4a5d4a87g", + "xn--mgbqly7c0a67fbc", + "xn--mgbqly7cvafr", + "xn--mgbpl2fh", + "xn--yfro4i67o", + "xn--clchc0ea0b2g2a9gcd", + "xn--ogbpf8fl", + "xn--mgbtf8fl", + "xn--o3cw4h", + "xn--12c1fe0br.xn--o3cw4h", + "xn--12co0c3b4eva.xn--o3cw4h", + "xn--h3cuzk1di.xn--o3cw4h", + "xn--o3cyx2a.xn--o3cw4h", + "xn--m3ch0j3a.xn--o3cw4h", + "xn--12cfi8ixb8l.xn--o3cw4h", + "xn--pgbs0dh", + "xn--kpry57d", + "xn--kprw13d", + "xn--nnx388a", + "xn--j1amh", + "xn--mgb2ddes", + "xxx", + "*.ye", + "ac.za", + "agric.za", + "alt.za", + "co.za", + "edu.za", + "gov.za", + "grondar.za", + "law.za", + "mil.za", + "net.za", + "ngo.za", + "nis.za", + "nom.za", + "org.za", + "school.za", + "tm.za", + "web.za", + "zm", + "ac.zm", + "biz.zm", + "co.zm", + "com.zm", + "edu.zm", + "gov.zm", + "info.zm", + "mil.zm", + "net.zm", + "org.zm", + "sch.zm", + "zw", + "ac.zw", + "co.zw", + "gov.zw", + "mil.zw", + "org.zw", + "aaa", + "aarp", + "abarth", + "abb", + "abbott", + "abbvie", + "abc", + "able", + "abogado", + "abudhabi", + "academy", + "accenture", + "accountant", + "accountants", + "aco", + "active", + "actor", + "adac", + "ads", + "adult", + "aeg", + "aetna", + "afamilycompany", + "afl", + "africa", + "agakhan", + "agency", + "aig", + "aigo", + "airbus", + "airforce", + "airtel", + "akdn", + "alfaromeo", + "alibaba", + "alipay", + "allfinanz", + "allstate", + "ally", + "alsace", + "alstom", + "americanexpress", + "americanfamily", + "amex", + "amfam", + "amica", + "amsterdam", + "analytics", + "android", + "anquan", + "anz", + "aol", + "apartments", + "app", + "apple", + "aquarelle", + "arab", + "aramco", + "archi", + "army", + "art", + "arte", + "asda", + "associates", + "athleta", + "attorney", + "auction", + "audi", + "audible", + "audio", + "auspost", + "author", + "auto", + "autos", + "avianca", + "aws", + "axa", + "azure", + "baby", + "baidu", + "banamex", + "bananarepublic", + "band", + "bank", + "bar", + "barcelona", + "barclaycard", + "barclays", + "barefoot", + "bargains", + "baseball", + "basketball", + "bauhaus", + "bayern", + "bbc", + "bbt", + "bbva", + "bcg", + "bcn", + "beats", + "beauty", + "beer", + "bentley", + "berlin", + "best", + "bestbuy", + "bet", + "bharti", + "bible", + "bid", + "bike", + "bing", + "bingo", + "bio", + "black", + "blackfriday", + "blanco", + "blockbuster", + "blog", + "bloomberg", + "blue", + "bms", + "bmw", + "bnl", + "bnpparibas", + "boats", + "boehringer", + "bofa", + "bom", + "bond", + "boo", + "book", + "booking", + "boots", + "bosch", + "bostik", + "boston", + "bot", + "boutique", + "box", + "bradesco", + "bridgestone", + "broadway", + "broker", + "brother", + "brussels", + "budapest", + "bugatti", + "build", + "builders", + "business", + "buy", + "buzz", + "bzh", + "cab", + "cafe", + "cal", + "call", + "calvinklein", + "cam", + "camera", + "camp", + "cancerresearch", + "canon", + "capetown", + "capital", + "capitalone", + "car", + "caravan", + "cards", + "care", + "career", + "careers", + "cars", + "cartier", + "casa", + "case", + "caseih", + "cash", + "casino", + "catering", + "catholic", + "cba", + "cbn", + "cbre", + "cbs", + "ceb", + "center", + "ceo", + "cern", + "cfa", + "cfd", + "chanel", + "channel", + "chase", + "chat", + "cheap", + "chintai", + "christmas", + "chrome", + "chrysler", + "church", + "cipriani", + "circle", + "cisco", + "citadel", + "citi", + "citic", + "city", + "cityeats", + "claims", + "cleaning", + "click", + "clinic", + "clinique", + "clothing", + "cloud", + "club", + "clubmed", + "coach", + "codes", + "coffee", + "college", + "cologne", + "comcast", + "commbank", + "community", + "company", + "compare", + "computer", + "comsec", + "condos", + "construction", + "consulting", + "contact", + "contractors", + "cooking", + "cookingchannel", + "cool", + "corsica", + "country", + "coupon", + "coupons", + "courses", + "credit", + "creditcard", + "creditunion", + "cricket", + "crown", + "crs", + "cruise", + "cruises", + "csc", + "cuisinella", + "cymru", + "cyou", + "dabur", + "dad", + "dance", + "data", + "date", + "dating", + "datsun", + "day", + "dclk", + "dds", + "deal", + "dealer", + "deals", + "degree", + "delivery", + "dell", + "deloitte", + "delta", + "democrat", + "dental", + "dentist", + "desi", + "design", + "dev", + "dhl", + "diamonds", + "diet", + "digital", + "direct", + "directory", + "discount", + "discover", + "dish", + "diy", + "dnp", + "docs", + "doctor", + "dodge", + "dog", + "doha", + "domains", + "dot", + "download", + "drive", + "dtv", + "dubai", + "duck", + "dunlop", + "duns", + "dupont", + "durban", + "dvag", + "dvr", + "earth", + "eat", + "eco", + "edeka", + "education", + "email", + "emerck", + "energy", + "engineer", + "engineering", + "enterprises", + "epost", + "epson", + "equipment", + "ericsson", + "erni", + "esq", + "estate", + "esurance", + "etisalat", + "eurovision", + "eus", + "events", + "everbank", + "exchange", + "expert", + "exposed", + "express", + "extraspace", + "fage", + "fail", + "fairwinds", + "faith", + "family", + "fan", + "fans", + "farm", + "farmers", + "fashion", + "fast", + "fedex", + "feedback", + "ferrari", + "ferrero", + "fiat", + "fidelity", + "fido", + "film", + "final", + "finance", + "financial", + "fire", + "firestone", + "firmdale", + "fish", + "fishing", + "fit", + "fitness", + "flickr", + "flights", + "flir", + "florist", + "flowers", + "fly", + "foo", + "food", + "foodnetwork", + "football", + "ford", + "forex", + "forsale", + "forum", + "foundation", + "fox", + "free", + "fresenius", + "frl", + "frogans", + "frontdoor", + "frontier", + "ftr", + "fujitsu", + "fujixerox", + "fun", + "fund", + "furniture", + "futbol", + "fyi", + "gal", + "gallery", + "gallo", + "gallup", + "game", + "games", + "gap", + "garden", + "gbiz", + "gdn", + "gea", + "gent", + "genting", + "george", + "ggee", + "gift", + "gifts", + "gives", + "giving", + "glade", + "glass", + "gle", + "global", + "globo", + "gmail", + "gmbh", + "gmo", + "gmx", + "godaddy", + "gold", + "goldpoint", + "golf", + "goo", + "goodhands", + "goodyear", + "goog", + "google", + "gop", + "got", + "grainger", + "graphics", + "gratis", + "green", + "gripe", + "grocery", + "group", + "guardian", + "gucci", + "guge", + "guide", + "guitars", + "guru", + "hair", + "hamburg", + "hangout", + "haus", + "hbo", + "hdfc", + "hdfcbank", + "health", + "healthcare", + "help", + "helsinki", + "here", + "hermes", + "hgtv", + "hiphop", + "hisamitsu", + "hitachi", + "hiv", + "hkt", + "hockey", + "holdings", + "holiday", + "homedepot", + "homegoods", + "homes", + "homesense", + "honda", + "honeywell", + "horse", + "hospital", + "host", + "hosting", + "hot", + "hoteles", + "hotels", + "hotmail", + "house", + "how", + "hsbc", + "hughes", + "hyatt", + "hyundai", + "ibm", + "icbc", + "ice", + "icu", + "ieee", + "ifm", + "ikano", + "imamat", + "imdb", + "immo", + "immobilien", + "industries", + "infiniti", + "ing", + "ink", + "institute", + "insurance", + "insure", + "intel", + "international", + "intuit", + "investments", + "ipiranga", + "irish", + "iselect", + "ismaili", + "ist", + "istanbul", + "itau", + "itv", + "iveco", + "iwc", + "jaguar", + "java", + "jcb", + "jcp", + "jeep", + "jetzt", + "jewelry", + "jio", + "jlc", + "jll", + "jmp", + "jnj", + "joburg", + "jot", + "joy", + "jpmorgan", + "jprs", + "juegos", + "juniper", + "kaufen", + "kddi", + "kerryhotels", + "kerrylogistics", + "kerryproperties", + "kfh", + "kia", + "kim", + "kinder", + "kindle", + "kitchen", + "kiwi", + "koeln", + "komatsu", + "kosher", + "kpmg", + "kpn", + "krd", + "kred", + "kuokgroup", + "kyoto", + "lacaixa", + "ladbrokes", + "lamborghini", + "lamer", + "lancaster", + "lancia", + "lancome", + "land", + "landrover", + "lanxess", + "lasalle", + "lat", + "latino", + "latrobe", + "law", + "lawyer", + "lds", + "lease", + "leclerc", + "lefrak", + "legal", + "lego", + "lexus", + "lgbt", + "liaison", + "lidl", + "life", + "lifeinsurance", + "lifestyle", + "lighting", + "like", + "lilly", + "limited", + "limo", + "lincoln", + "linde", + "link", + "lipsy", + "live", + "living", + "lixil", + "loan", + "loans", + "locker", + "locus", + "loft", + "lol", + "london", + "lotte", + "lotto", + "love", + "lpl", + "lplfinancial", + "ltd", + "ltda", + "lundbeck", + "lupin", + "luxe", + "luxury", + "macys", + "madrid", + "maif", + "maison", + "makeup", + "man", + "management", + "mango", + "map", + "market", + "marketing", + "markets", + "marriott", + "marshalls", + "maserati", + "mattel", + "mba", + "mckinsey", + "med", + "media", + "meet", + "melbourne", + "meme", + "memorial", + "men", + "menu", + "meo", + "merckmsd", + "metlife", + "miami", + "microsoft", + "mini", + "mint", + "mit", + "mitsubishi", + "mlb", + "mls", + "mma", + "mobile", + "mobily", + "moda", + "moe", + "moi", + "mom", + "monash", + "money", + "monster", + "mopar", + "mormon", + "mortgage", + "moscow", + "moto", + "motorcycles", + "mov", + "movie", + "movistar", + "msd", + "mtn", + "mtpc", + "mtr", + "mutual", + "nab", + "nadex", + "nagoya", + "nationwide", + "natura", + "navy", + "nba", + "nec", + "netbank", + "netflix", + "network", + "neustar", + "new", + "newholland", + "news", + "next", + "nextdirect", + "nexus", + "nfl", + "ngo", + "nhk", + "nico", + "nike", + "nikon", + "ninja", + "nissan", + "nissay", + "nokia", + "northwesternmutual", + "norton", + "now", + "nowruz", + "nowtv", + "nra", + "nrw", + "ntt", + "nyc", + "obi", + "observer", + "off", + "office", + "okinawa", + "olayan", + "olayangroup", + "oldnavy", + "ollo", + "omega", + "one", + "ong", + "onl", + "online", + "onyourside", + "ooo", + "open", + "oracle", + "orange", + "organic", + "origins", + "osaka", + "otsuka", + "ott", + "ovh", + "page", + "panasonic", + "panerai", + "paris", + "pars", + "partners", + "parts", + "party", + "passagens", + "pay", + "pccw", + "pet", + "pfizer", + "pharmacy", + "phd", + "philips", + "phone", + "photo", + "photography", + "photos", + "physio", + "piaget", + "pics", + "pictet", + "pictures", + "pid", + "pin", + "ping", + "pink", + "pioneer", + "pizza", + "place", + "play", + "playstation", + "plumbing", + "plus", + "pnc", + "pohl", + "poker", + "politie", + "porn", + "pramerica", + "praxi", + "press", + "prime", + "prod", + "productions", + "prof", + "progressive", + "promo", + "properties", + "property", + "protection", + "pru", + "prudential", + "pub", + "pwc", + "qpon", + "quebec", + "quest", + "qvc", + "racing", + "radio", + "raid", + "read", + "realestate", + "realtor", + "realty", + "recipes", + "red", + "redstone", + "redumbrella", + "rehab", + "reise", + "reisen", + "reit", + "reliance", + "ren", + "rent", + "rentals", + "repair", + "report", + "republican", + "rest", + "restaurant", + "review", + "reviews", + "rexroth", + "rich", + "richardli", + "ricoh", + "rightathome", + "ril", + "rio", + "rip", + "rmit", + "rocher", + "rocks", + "rodeo", + "rogers", + "room", + "rsvp", + "rugby", + "ruhr", + "run", + "rwe", + "ryukyu", + "saarland", + "safe", + "safety", + "sakura", + "sale", + "salon", + "samsclub", + "samsung", + "sandvik", + "sandvikcoromant", + "sanofi", + "sap", + "sapo", + "sarl", + "sas", + "save", + "saxo", + "sbi", + "sbs", + "sca", + "scb", + "schaeffler", + "schmidt", + "scholarships", + "school", + "schule", + "schwarz", + "science", + "scjohnson", + "scor", + "scot", + "search", + "seat", + "secure", + "security", + "seek", + "select", + "sener", + "services", + "ses", + "seven", + "sew", + "sex", + "sexy", + "sfr", + "shangrila", + "sharp", + "shaw", + "shell", + "shia", + "shiksha", + "shoes", + "shop", + "shopping", + "shouji", + "show", + "showtime", + "shriram", + "silk", + "sina", + "singles", + "site", + "ski", + "skin", + "sky", + "skype", + "sling", + "smart", + "smile", + "sncf", + "soccer", + "social", + "softbank", + "software", + "sohu", + "solar", + "solutions", + "song", + "sony", + "soy", + "space", + "spiegel", + "spot", + "spreadbetting", + "srl", + "srt", + "stada", + "staples", + "star", + "starhub", + "statebank", + "statefarm", + "statoil", + "stc", + "stcgroup", + "stockholm", + "storage", + "store", + "stream", + "studio", + "study", + "style", + "sucks", + "supplies", + "supply", + "support", + "surf", + "surgery", + "suzuki", + "swatch", + "swiftcover", + "swiss", + "sydney", + "symantec", + "systems", + "tab", + "taipei", + "talk", + "taobao", + "target", + "tatamotors", + "tatar", + "tattoo", + "tax", + "taxi", + "tci", + "tdk", + "team", + "tech", + "technology", + "telecity", + "telefonica", + "temasek", + "tennis", + "teva", + "thd", + "theater", + "theatre", + "tiaa", + "tickets", + "tienda", + "tiffany", + "tips", + "tires", + "tirol", + "tjmaxx", + "tjx", + "tkmaxx", + "tmall", + "today", + "tokyo", + "tools", + "top", + "toray", + "toshiba", + "total", + "tours", + "town", + "toyota", + "toys", + "trade", + "trading", + "training", + "travelchannel", + "travelers", + "travelersinsurance", + "trust", + "trv", + "tube", + "tui", + "tunes", + "tushu", + "tvs", + "ubank", + "ubs", + "uconnect", + "unicom", + "university", + "uno", + "uol", + "ups", + "vacations", + "vana", + "vanguard", + "vegas", + "ventures", + "verisign", + "versicherung", + "vet", + "viajes", + "video", + "vig", + "viking", + "villas", + "vin", + "vip", + "virgin", + "visa", + "vision", + "vista", + "vistaprint", + "viva", + "vivo", + "vlaanderen", + "vodka", + "volkswagen", + "volvo", + "vote", + "voting", + "voto", + "voyage", + "vuelos", + "wales", + "walmart", + "walter", + "wang", + "wanggou", + "warman", + "watch", + "watches", + "weather", + "weatherchannel", + "webcam", + "weber", + "website", + "wed", + "wedding", + "weibo", + "weir", + "whoswho", + "wien", + "wiki", + "williamhill", + "win", + "windows", + "wine", + "winners", + "wme", + "wolterskluwer", + "woodside", + "work", + "works", + "world", + "wow", + "wtc", + "wtf", + "xbox", + "xerox", + "xfinity", + "xihuan", + "xin", + "xn--11b4c3d", + "xn--1ck2e1b", + "xn--1qqw23a", + "xn--30rr7y", + "xn--3bst00m", + "xn--3ds443g", + "xn--3oq18vl8pn36a", + "xn--3pxu8k", + "xn--42c2d9a", + "xn--45q11c", + "xn--4gbrim", + "xn--55qw42g", + "xn--55qx5d", + "xn--5su34j936bgsg", + "xn--5tzm5g", + "xn--6frz82g", + "xn--6qq986b3xl", + "xn--80adxhks", + "xn--80aqecdr1a", + "xn--80asehdb", + "xn--80aswg", + "xn--8y0a063a", + "xn--9dbq2a", + "xn--9et52u", + "xn--9krt00a", + "xn--b4w605ferd", + "xn--bck1b9a5dre4c", + "xn--c1avg", + "xn--c2br7g", + "xn--cck2b3b", + "xn--cg4bki", + "xn--czr694b", + "xn--czrs0t", + "xn--czru2d", + "xn--d1acj3b", + "xn--eckvdtc9d", + "xn--efvy88h", + "xn--estv75g", + "xn--fct429k", + "xn--fhbei", + "xn--fiq228c5hs", + "xn--fiq64b", + "xn--fjq720a", + "xn--flw351e", + "xn--fzys8d69uvgm", + "xn--g2xx48c", + "xn--gckr3f0f", + "xn--gk3at1e", + "xn--hxt814e", + "xn--i1b6b1a6a2e", + "xn--imr513n", + "xn--io0a7i", + "xn--j1aef", + "xn--jlq61u9w7b", + "xn--jvr189m", + "xn--kcrx77d1x4a", + "xn--kpu716f", + "xn--kput3i", + "xn--mgba3a3ejt", + "xn--mgba7c0bbn0a", + "xn--mgbaakc7dvf", + "xn--mgbab2bd", + "xn--mgbb9fbpob", + "xn--mgbca7dzdo", + "xn--mgbi4ecexp", + "xn--mgbt3dhd", + "xn--mk1bu44c", + "xn--mxtq1m", + "xn--ngbc5azd", + "xn--ngbe9e0a", + "xn--ngbrx", + "xn--nqv7f", + "xn--nqv7fs00ema", + "xn--nyqy26a", + "xn--p1acf", + "xn--pbt977c", + "xn--pssy2u", + "xn--q9jyb4c", + "xn--qcka1pmc", + "xn--rhqv96g", + "xn--rovu88b", + "xn--ses554g", + "xn--t60b56a", + "xn--tckwe", + "xn--tiq49xqyj", + "xn--unup4y", + "xn--vermgensberater-ctb", + "xn--vermgensberatung-pwb", + "xn--vhquv", + "xn--vuq861b", + "xn--w4r85el8fhu5dnra", + "xn--w4rs40l", + "xn--xhq521b", + "xn--zfr164b", + "xperia", + "xyz", + "yachts", + "yahoo", + "yamaxun", + "yandex", + "yodobashi", + "yoga", + "yokohama", + "you", + "youtube", + "yun", + "zappos", + "zara", + "zero", + "zip", + "zippo", + "zone", + "zuerich", + "cc.ua", + "inf.ua", + "ltd.ua", + "1password.ca", + "1password.com", + "1password.eu", + "beep.pl", + "*.compute.estate", + "*.alces.network", + "alwaysdata.net", + "cloudfront.net", + "*.compute.amazonaws.com", + "*.compute-1.amazonaws.com", + "*.compute.amazonaws.com.cn", + "us-east-1.amazonaws.com", + "cn-north-1.eb.amazonaws.com.cn", + "elasticbeanstalk.com", + "ap-northeast-1.elasticbeanstalk.com", + "ap-northeast-2.elasticbeanstalk.com", + "ap-south-1.elasticbeanstalk.com", + "ap-southeast-1.elasticbeanstalk.com", + "ap-southeast-2.elasticbeanstalk.com", + "ca-central-1.elasticbeanstalk.com", + "eu-central-1.elasticbeanstalk.com", + "eu-west-1.elasticbeanstalk.com", + "eu-west-2.elasticbeanstalk.com", + "eu-west-3.elasticbeanstalk.com", + "sa-east-1.elasticbeanstalk.com", + "us-east-1.elasticbeanstalk.com", + "us-east-2.elasticbeanstalk.com", + "us-gov-west-1.elasticbeanstalk.com", + "us-west-1.elasticbeanstalk.com", + "us-west-2.elasticbeanstalk.com", + "*.elb.amazonaws.com", + "*.elb.amazonaws.com.cn", + "s3.amazonaws.com", + "s3-ap-northeast-1.amazonaws.com", + "s3-ap-northeast-2.amazonaws.com", + "s3-ap-south-1.amazonaws.com", + "s3-ap-southeast-1.amazonaws.com", + "s3-ap-southeast-2.amazonaws.com", + "s3-ca-central-1.amazonaws.com", + "s3-eu-central-1.amazonaws.com", + "s3-eu-west-1.amazonaws.com", + "s3-eu-west-2.amazonaws.com", + "s3-eu-west-3.amazonaws.com", + "s3-external-1.amazonaws.com", + "s3-fips-us-gov-west-1.amazonaws.com", + "s3-sa-east-1.amazonaws.com", + "s3-us-gov-west-1.amazonaws.com", + "s3-us-east-2.amazonaws.com", + "s3-us-west-1.amazonaws.com", + "s3-us-west-2.amazonaws.com", + "s3.ap-northeast-2.amazonaws.com", + "s3.ap-south-1.amazonaws.com", + "s3.cn-north-1.amazonaws.com.cn", + "s3.ca-central-1.amazonaws.com", + "s3.eu-central-1.amazonaws.com", + "s3.eu-west-2.amazonaws.com", + "s3.eu-west-3.amazonaws.com", + "s3.us-east-2.amazonaws.com", + "s3.dualstack.ap-northeast-1.amazonaws.com", + "s3.dualstack.ap-northeast-2.amazonaws.com", + "s3.dualstack.ap-south-1.amazonaws.com", + "s3.dualstack.ap-southeast-1.amazonaws.com", + "s3.dualstack.ap-southeast-2.amazonaws.com", + "s3.dualstack.ca-central-1.amazonaws.com", + "s3.dualstack.eu-central-1.amazonaws.com", + "s3.dualstack.eu-west-1.amazonaws.com", + "s3.dualstack.eu-west-2.amazonaws.com", + "s3.dualstack.eu-west-3.amazonaws.com", + "s3.dualstack.sa-east-1.amazonaws.com", + "s3.dualstack.us-east-1.amazonaws.com", + "s3.dualstack.us-east-2.amazonaws.com", + "s3-website-us-east-1.amazonaws.com", + "s3-website-us-west-1.amazonaws.com", + "s3-website-us-west-2.amazonaws.com", + "s3-website-ap-northeast-1.amazonaws.com", + "s3-website-ap-southeast-1.amazonaws.com", + "s3-website-ap-southeast-2.amazonaws.com", + "s3-website-eu-west-1.amazonaws.com", + "s3-website-sa-east-1.amazonaws.com", + "s3-website.ap-northeast-2.amazonaws.com", + "s3-website.ap-south-1.amazonaws.com", + "s3-website.ca-central-1.amazonaws.com", + "s3-website.eu-central-1.amazonaws.com", + "s3-website.eu-west-2.amazonaws.com", + "s3-website.eu-west-3.amazonaws.com", + "s3-website.us-east-2.amazonaws.com", + "t3l3p0rt.net", + "tele.amune.org", + "on-aptible.com", + "user.party.eus", + "pimienta.org", + "poivron.org", + "potager.org", + "sweetpepper.org", + "myasustor.com", + "myfritz.net", + "*.awdev.ca", + "*.advisor.ws", + "backplaneapp.io", + "betainabox.com", + "bnr.la", + "boomla.net", + "boxfuse.io", + "square7.ch", + "bplaced.com", + "bplaced.de", + "square7.de", + "bplaced.net", + "square7.net", + "browsersafetymark.io", + "mycd.eu", + "ae.org", + "ar.com", + "br.com", + "cn.com", + "com.de", + "com.se", + "de.com", + "eu.com", + "gb.com", + "gb.net", + "hu.com", + "hu.net", + "jp.net", + "jpn.com", + "kr.com", + "mex.com", + "no.com", + "qc.com", + "ru.com", + "sa.com", + "se.com", + "se.net", + "uk.com", + "uk.net", + "us.com", + "uy.com", + "za.bz", + "za.com", + "africa.com", + "gr.com", + "in.net", + "us.org", + "co.com", + "c.la", + "certmgr.org", + "xenapponazure.com", + "virtueeldomein.nl", + "c66.me", + "jdevcloud.com", + "wpdevcloud.com", + "cloudaccess.host", + "freesite.host", + "cloudaccess.net", + "cloudcontrolled.com", + "cloudcontrolapp.com", + "co.ca", + "co.cz", + "c.cdn77.org", + "cdn77-ssl.net", + "r.cdn77.net", + "rsc.cdn77.org", + "ssl.origin.cdn77-secure.org", + "cloudns.asia", + "cloudns.biz", + "cloudns.club", + "cloudns.cc", + "cloudns.eu", + "cloudns.in", + "cloudns.info", + "cloudns.org", + "cloudns.pro", + "cloudns.pw", + "cloudns.us", + "co.nl", + "co.no", + "webhosting.be", + "hosting-cluster.nl", + "dyn.cosidns.de", + "dynamisches-dns.de", + "dnsupdater.de", + "internet-dns.de", + "l-o-g-i-n.de", + "dynamic-dns.info", + "feste-ip.net", + "knx-server.net", + "static-access.net", + "realm.cz", + "*.cryptonomic.net", + "cupcake.is", + "cyon.link", + "cyon.site", + "daplie.me", + "localhost.daplie.me", + "biz.dk", + "co.dk", + "firm.dk", + "reg.dk", + "store.dk", + "debian.net", + "dedyn.io", + "dnshome.de", + "drayddns.com", + "dreamhosters.com", + "mydrobo.com", + "drud.io", + "drud.us", + "duckdns.org", + "dy.fi", + "tunk.org", + "dyndns-at-home.com", + "dyndns-at-work.com", + "dyndns-blog.com", + "dyndns-free.com", + "dyndns-home.com", + "dyndns-ip.com", + "dyndns-mail.com", + "dyndns-office.com", + "dyndns-pics.com", + "dyndns-remote.com", + "dyndns-server.com", + "dyndns-web.com", + "dyndns-wiki.com", + "dyndns-work.com", + "dyndns.biz", + "dyndns.info", + "dyndns.org", + "dyndns.tv", + "at-band-camp.net", + "ath.cx", + "barrel-of-knowledge.info", + "barrell-of-knowledge.info", + "better-than.tv", + "blogdns.com", + "blogdns.net", + "blogdns.org", + "blogsite.org", + "boldlygoingnowhere.org", + "broke-it.net", + "buyshouses.net", + "cechire.com", + "dnsalias.com", + "dnsalias.net", + "dnsalias.org", + "dnsdojo.com", + "dnsdojo.net", + "dnsdojo.org", + "does-it.net", + "doesntexist.com", + "doesntexist.org", + "dontexist.com", + "dontexist.net", + "dontexist.org", + "doomdns.com", + "doomdns.org", + "dvrdns.org", + "dyn-o-saur.com", + "dynalias.com", + "dynalias.net", + "dynalias.org", + "dynathome.net", + "dyndns.ws", + "endofinternet.net", + "endofinternet.org", + "endoftheinternet.org", + "est-a-la-maison.com", + "est-a-la-masion.com", + "est-le-patron.com", + "est-mon-blogueur.com", + "for-better.biz", + "for-more.biz", + "for-our.info", + "for-some.biz", + "for-the.biz", + "forgot.her.name", + "forgot.his.name", + "from-ak.com", + "from-al.com", + "from-ar.com", + "from-az.net", + "from-ca.com", + "from-co.net", + "from-ct.com", + "from-dc.com", + "from-de.com", + "from-fl.com", + "from-ga.com", + "from-hi.com", + "from-ia.com", + "from-id.com", + "from-il.com", + "from-in.com", + "from-ks.com", + "from-ky.com", + "from-la.net", + "from-ma.com", + "from-md.com", + "from-me.org", + "from-mi.com", + "from-mn.com", + "from-mo.com", + "from-ms.com", + "from-mt.com", + "from-nc.com", + "from-nd.com", + "from-ne.com", + "from-nh.com", + "from-nj.com", + "from-nm.com", + "from-nv.com", + "from-ny.net", + "from-oh.com", + "from-ok.com", + "from-or.com", + "from-pa.com", + "from-pr.com", + "from-ri.com", + "from-sc.com", + "from-sd.com", + "from-tn.com", + "from-tx.com", + "from-ut.com", + "from-va.com", + "from-vt.com", + "from-wa.com", + "from-wi.com", + "from-wv.com", + "from-wy.com", + "ftpaccess.cc", + "fuettertdasnetz.de", + "game-host.org", + "game-server.cc", + "getmyip.com", + "gets-it.net", + "go.dyndns.org", + "gotdns.com", + "gotdns.org", + "groks-the.info", + "groks-this.info", + "ham-radio-op.net", + "here-for-more.info", + "hobby-site.com", + "hobby-site.org", + "home.dyndns.org", + "homedns.org", + "homeftp.net", + "homeftp.org", + "homeip.net", + "homelinux.com", + "homelinux.net", + "homelinux.org", + "homeunix.com", + "homeunix.net", + "homeunix.org", + "iamallama.com", + "in-the-band.net", + "is-a-anarchist.com", + "is-a-blogger.com", + "is-a-bookkeeper.com", + "is-a-bruinsfan.org", + "is-a-bulls-fan.com", + "is-a-candidate.org", + "is-a-caterer.com", + "is-a-celticsfan.org", + "is-a-chef.com", + "is-a-chef.net", + "is-a-chef.org", + "is-a-conservative.com", + "is-a-cpa.com", + "is-a-cubicle-slave.com", + "is-a-democrat.com", + "is-a-designer.com", + "is-a-doctor.com", + "is-a-financialadvisor.com", + "is-a-geek.com", + "is-a-geek.net", + "is-a-geek.org", + "is-a-green.com", + "is-a-guru.com", + "is-a-hard-worker.com", + "is-a-hunter.com", + "is-a-knight.org", + "is-a-landscaper.com", + "is-a-lawyer.com", + "is-a-liberal.com", + "is-a-libertarian.com", + "is-a-linux-user.org", + "is-a-llama.com", + "is-a-musician.com", + "is-a-nascarfan.com", + "is-a-nurse.com", + "is-a-painter.com", + "is-a-patsfan.org", + "is-a-personaltrainer.com", + "is-a-photographer.com", + "is-a-player.com", + "is-a-republican.com", + "is-a-rockstar.com", + "is-a-socialist.com", + "is-a-soxfan.org", + "is-a-student.com", + "is-a-teacher.com", + "is-a-techie.com", + "is-a-therapist.com", + "is-an-accountant.com", + "is-an-actor.com", + "is-an-actress.com", + "is-an-anarchist.com", + "is-an-artist.com", + "is-an-engineer.com", + "is-an-entertainer.com", + "is-by.us", + "is-certified.com", + "is-found.org", + "is-gone.com", + "is-into-anime.com", + "is-into-cars.com", + "is-into-cartoons.com", + "is-into-games.com", + "is-leet.com", + "is-lost.org", + "is-not-certified.com", + "is-saved.org", + "is-slick.com", + "is-uberleet.com", + "is-very-bad.org", + "is-very-evil.org", + "is-very-good.org", + "is-very-nice.org", + "is-very-sweet.org", + "is-with-theband.com", + "isa-geek.com", + "isa-geek.net", + "isa-geek.org", + "isa-hockeynut.com", + "issmarterthanyou.com", + "isteingeek.de", + "istmein.de", + "kicks-ass.net", + "kicks-ass.org", + "knowsitall.info", + "land-4-sale.us", + "lebtimnetz.de", + "leitungsen.de", + "likes-pie.com", + "likescandy.com", + "merseine.nu", + "mine.nu", + "misconfused.org", + "mypets.ws", + "myphotos.cc", + "neat-url.com", + "office-on-the.net", + "on-the-web.tv", + "podzone.net", + "podzone.org", + "readmyblog.org", + "saves-the-whales.com", + "scrapper-site.net", + "scrapping.cc", + "selfip.biz", + "selfip.com", + "selfip.info", + "selfip.net", + "selfip.org", + "sells-for-less.com", + "sells-for-u.com", + "sells-it.net", + "sellsyourhome.org", + "servebbs.com", + "servebbs.net", + "servebbs.org", + "serveftp.net", + "serveftp.org", + "servegame.org", + "shacknet.nu", + "simple-url.com", + "space-to-rent.com", + "stuff-4-sale.org", + "stuff-4-sale.us", + "teaches-yoga.com", + "thruhere.net", + "traeumtgerade.de", + "webhop.biz", + "webhop.info", + "webhop.net", + "webhop.org", + "worse-than.tv", + "writesthisblog.com", + "ddnss.de", + "dyn.ddnss.de", + "dyndns.ddnss.de", + "dyndns1.de", + "dyn-ip24.de", + "home-webserver.de", + "dyn.home-webserver.de", + "myhome-server.de", + "ddnss.org", + "definima.net", + "definima.io", + "ddnsfree.com", + "ddnsgeek.com", + "giize.com", + "gleeze.com", + "kozow.com", + "loseyourip.com", + "ooguy.com", + "theworkpc.com", + "casacam.net", + "dynu.net", + "accesscam.org", + "camdvr.org", + "freeddns.org", + "mywire.org", + "webredirect.org", + "myddns.rocks", + "blogsite.xyz", + "dynv6.net", + "e4.cz", + "mytuleap.com", + "enonic.io", + "customer.enonic.io", + "eu.org", + "al.eu.org", + "asso.eu.org", + "at.eu.org", + "au.eu.org", + "be.eu.org", + "bg.eu.org", + "ca.eu.org", + "cd.eu.org", + "ch.eu.org", + "cn.eu.org", + "cy.eu.org", + "cz.eu.org", + "de.eu.org", + "dk.eu.org", + "edu.eu.org", + "ee.eu.org", + "es.eu.org", + "fi.eu.org", + "fr.eu.org", + "gr.eu.org", + "hr.eu.org", + "hu.eu.org", + "ie.eu.org", + "il.eu.org", + "in.eu.org", + "int.eu.org", + "is.eu.org", + "it.eu.org", + "jp.eu.org", + "kr.eu.org", + "lt.eu.org", + "lu.eu.org", + "lv.eu.org", + "mc.eu.org", + "me.eu.org", + "mk.eu.org", + "mt.eu.org", + "my.eu.org", + "net.eu.org", + "ng.eu.org", + "nl.eu.org", + "no.eu.org", + "nz.eu.org", + "paris.eu.org", + "pl.eu.org", + "pt.eu.org", + "q-a.eu.org", + "ro.eu.org", + "ru.eu.org", + "se.eu.org", + "si.eu.org", + "sk.eu.org", + "tr.eu.org", + "uk.eu.org", + "us.eu.org", + "eu-1.evennode.com", + "eu-2.evennode.com", + "eu-3.evennode.com", + "eu-4.evennode.com", + "us-1.evennode.com", + "us-2.evennode.com", + "us-3.evennode.com", + "us-4.evennode.com", + "twmail.cc", + "twmail.net", + "twmail.org", + "mymailer.com.tw", + "url.tw", + "apps.fbsbx.com", + "ru.net", + "adygeya.ru", + "bashkiria.ru", + "bir.ru", + "cbg.ru", + "com.ru", + "dagestan.ru", + "grozny.ru", + "kalmykia.ru", + "kustanai.ru", + "marine.ru", + "mordovia.ru", + "msk.ru", + "mytis.ru", + "nalchik.ru", + "nov.ru", + "pyatigorsk.ru", + "spb.ru", + "vladikavkaz.ru", + "vladimir.ru", + "abkhazia.su", + "adygeya.su", + "aktyubinsk.su", + "arkhangelsk.su", + "armenia.su", + "ashgabad.su", + "azerbaijan.su", + "balashov.su", + "bashkiria.su", + "bryansk.su", + "bukhara.su", + "chimkent.su", + "dagestan.su", + "east-kazakhstan.su", + "exnet.su", + "georgia.su", + "grozny.su", + "ivanovo.su", + "jambyl.su", + "kalmykia.su", + "kaluga.su", + "karacol.su", + "karaganda.su", + "karelia.su", + "khakassia.su", + "krasnodar.su", + "kurgan.su", + "kustanai.su", + "lenug.su", + "mangyshlak.su", + "mordovia.su", + "msk.su", + "murmansk.su", + "nalchik.su", + "navoi.su", + "north-kazakhstan.su", + "nov.su", + "obninsk.su", + "penza.su", + "pokrovsk.su", + "sochi.su", + "spb.su", + "tashkent.su", + "termez.su", + "togliatti.su", + "troitsk.su", + "tselinograd.su", + "tula.su", + "tuva.su", + "vladikavkaz.su", + "vladimir.su", + "vologda.su", + "channelsdvr.net", + "fastlylb.net", + "map.fastlylb.net", + "freetls.fastly.net", + "map.fastly.net", + "a.prod.fastly.net", + "global.prod.fastly.net", + "a.ssl.fastly.net", + "b.ssl.fastly.net", + "global.ssl.fastly.net", + "fhapp.xyz", + "fedorainfracloud.org", + "fedorapeople.org", + "cloud.fedoraproject.org", + "app.os.fedoraproject.org", + "app.os.stg.fedoraproject.org", + "filegear.me", + "firebaseapp.com", + "flynnhub.com", + "flynnhosting.net", + "freebox-os.com", + "freeboxos.com", + "fbx-os.fr", + "fbxos.fr", + "freebox-os.fr", + "freeboxos.fr", + "*.futurecms.at", + "futurehosting.at", + "futuremailing.at", + "*.ex.ortsinfo.at", + "*.kunden.ortsinfo.at", + "*.statics.cloud", + "service.gov.uk", + "github.io", + "githubusercontent.com", + "gitlab.io", + "homeoffice.gov.uk", + "ro.im", + "shop.ro", + "goip.de", + "*.0emm.com", + "appspot.com", + "blogspot.ae", + "blogspot.al", + "blogspot.am", + "blogspot.ba", + "blogspot.be", + "blogspot.bg", + "blogspot.bj", + "blogspot.ca", + "blogspot.cf", + "blogspot.ch", + "blogspot.cl", + "blogspot.co.at", + "blogspot.co.id", + "blogspot.co.il", + "blogspot.co.ke", + "blogspot.co.nz", + "blogspot.co.uk", + "blogspot.co.za", + "blogspot.com", + "blogspot.com.ar", + "blogspot.com.au", + "blogspot.com.br", + "blogspot.com.by", + "blogspot.com.co", + "blogspot.com.cy", + "blogspot.com.ee", + "blogspot.com.eg", + "blogspot.com.es", + "blogspot.com.mt", + "blogspot.com.ng", + "blogspot.com.tr", + "blogspot.com.uy", + "blogspot.cv", + "blogspot.cz", + "blogspot.de", + "blogspot.dk", + "blogspot.fi", + "blogspot.fr", + "blogspot.gr", + "blogspot.hk", + "blogspot.hr", + "blogspot.hu", + "blogspot.ie", + "blogspot.in", + "blogspot.is", + "blogspot.it", + "blogspot.jp", + "blogspot.kr", + "blogspot.li", + "blogspot.lt", + "blogspot.lu", + "blogspot.md", + "blogspot.mk", + "blogspot.mr", + "blogspot.mx", + "blogspot.my", + "blogspot.nl", + "blogspot.no", + "blogspot.pe", + "blogspot.pt", + "blogspot.qa", + "blogspot.re", + "blogspot.ro", + "blogspot.rs", + "blogspot.ru", + "blogspot.se", + "blogspot.sg", + "blogspot.si", + "blogspot.sk", + "blogspot.sn", + "blogspot.td", + "blogspot.tw", + "blogspot.ug", + "blogspot.vn", + "cloudfunctions.net", + "cloud.goog", + "codespot.com", + "googleapis.com", + "googlecode.com", + "pagespeedmobilizer.com", + "publishproxy.com", + "withgoogle.com", + "withyoutube.com", + "hashbang.sh", + "hasura-app.io", + "hepforge.org", + "herokuapp.com", + "herokussl.com", + "moonscale.net", + "iki.fi", + "biz.at", + "info.at", + "info.cx", + "ac.leg.br", + "al.leg.br", + "am.leg.br", + "ap.leg.br", + "ba.leg.br", + "ce.leg.br", + "df.leg.br", + "es.leg.br", + "go.leg.br", + "ma.leg.br", + "mg.leg.br", + "ms.leg.br", + "mt.leg.br", + "pa.leg.br", + "pb.leg.br", + "pe.leg.br", + "pi.leg.br", + "pr.leg.br", + "rj.leg.br", + "rn.leg.br", + "ro.leg.br", + "rr.leg.br", + "rs.leg.br", + "sc.leg.br", + "se.leg.br", + "sp.leg.br", + "to.leg.br", + "pixolino.com", + "ipifony.net", + "*.triton.zone", + "*.cns.joyent.com", + "js.org", + "keymachine.de", + "knightpoint.systems", + "co.krd", + "edu.krd", + "git-repos.de", + "lcube-server.de", + "svn-repos.de", + "we.bs", + "barsy.bg", + "barsyonline.com", + "barsy.de", + "barsy.eu", + "barsy.in", + "barsy.net", + "barsy.online", + "barsy.support", + "*.magentosite.cloud", + "hb.cldmail.ru", + "cloud.metacentrum.cz", + "custom.metacentrum.cz", + "meteorapp.com", + "eu.meteorapp.com", + "co.pl", + "azurewebsites.net", + "azure-mobile.net", + "cloudapp.net", + "bmoattachments.org", + "net.ru", + "org.ru", + "pp.ru", + "bitballoon.com", + "netlify.com", + "4u.com", + "ngrok.io", + "nh-serv.co.uk", + "nfshost.com", + "nsupdate.info", + "nerdpol.ovh", + "blogsyte.com", + "brasilia.me", + "cable-modem.org", + "ciscofreak.com", + "collegefan.org", + "couchpotatofries.org", + "damnserver.com", + "ddns.me", + "ditchyourip.com", + "dnsfor.me", + "dnsiskinky.com", + "dvrcam.info", + "dynns.com", + "eating-organic.net", + "fantasyleague.cc", + "geekgalaxy.com", + "golffan.us", + "health-carereform.com", + "homesecuritymac.com", + "homesecuritypc.com", + "hopto.me", + "ilovecollege.info", + "loginto.me", + "mlbfan.org", + "mmafan.biz", + "myactivedirectory.com", + "mydissent.net", + "myeffect.net", + "mymediapc.net", + "mypsx.net", + "mysecuritycamera.com", + "mysecuritycamera.net", + "mysecuritycamera.org", + "net-freaks.com", + "nflfan.org", + "nhlfan.net", + "no-ip.ca", + "no-ip.co.uk", + "no-ip.net", + "noip.us", + "onthewifi.com", + "pgafan.net", + "point2this.com", + "pointto.us", + "privatizehealthinsurance.net", + "quicksytes.com", + "read-books.org", + "securitytactics.com", + "serveexchange.com", + "servehumour.com", + "servep2p.com", + "servesarcasm.com", + "stufftoread.com", + "ufcfan.org", + "unusualperson.com", + "workisboring.com", + "3utilities.com", + "bounceme.net", + "ddns.net", + "ddnsking.com", + "gotdns.ch", + "hopto.org", + "myftp.biz", + "myftp.org", + "myvnc.com", + "no-ip.biz", + "no-ip.info", + "no-ip.org", + "noip.me", + "redirectme.net", + "servebeer.com", + "serveblog.net", + "servecounterstrike.com", + "serveftp.com", + "servegame.com", + "servehalflife.com", + "servehttp.com", + "serveirc.com", + "serveminecraft.net", + "servemp3.com", + "servepics.com", + "servequake.com", + "sytes.net", + "webhop.me", + "zapto.org", + "stage.nodeart.io", + "nodum.co", + "nodum.io", + "nyc.mn", + "nom.ae", + "nom.ai", + "nom.al", + "nym.by", + "nym.bz", + "nom.cl", + "nom.gd", + "nom.gl", + "nym.gr", + "nom.gt", + "nom.hn", + "nom.im", + "nym.kz", + "nym.la", + "nom.li", + "nym.li", + "nym.lt", + "nym.lu", + "nym.me", + "nom.mk", + "nym.mx", + "nom.nu", + "nym.nz", + "nym.pe", + "nym.pt", + "nom.pw", + "nom.qa", + "nom.rs", + "nom.si", + "nym.sk", + "nym.su", + "nym.sx", + "nym.tw", + "nom.ug", + "nom.uy", + "nom.vc", + "nom.vg", + "cya.gg", + "nid.io", + "opencraft.hosting", + "operaunite.com", + "outsystemscloud.com", + "ownprovider.com", + "oy.lc", + "pgfog.com", + "pagefrontapp.com", + "art.pl", + "gliwice.pl", + "krakow.pl", + "poznan.pl", + "wroc.pl", + "zakopane.pl", + "pantheonsite.io", + "gotpantheon.com", + "mypep.link", + "on-web.fr", + "*.platform.sh", + "*.platformsh.site", + "xen.prgmr.com", + "priv.at", + "protonet.io", + "chirurgiens-dentistes-en-france.fr", + "byen.site", + "qa2.com", + "dev-myqnapcloud.com", + "alpha-myqnapcloud.com", + "myqnapcloud.com", + "*.quipelements.com", + "vapor.cloud", + "vaporcloud.io", + "rackmaze.com", + "rackmaze.net", + "rhcloud.com", + "resindevice.io", + "devices.resinstaging.io", + "hzc.io", + "wellbeingzone.eu", + "ptplus.fit", + "wellbeingzone.co.uk", + "sandcats.io", + "logoip.de", + "logoip.com", + "scrysec.com", + "firewall-gateway.com", + "firewall-gateway.de", + "my-gateway.de", + "my-router.de", + "spdns.de", + "spdns.eu", + "firewall-gateway.net", + "my-firewall.org", + "myfirewall.org", + "spdns.org", + "*.s5y.io", + "*.sensiosite.cloud", + "biz.ua", + "co.ua", + "pp.ua", + "shiftedit.io", + "myshopblocks.com", + "1kapp.com", + "appchizi.com", + "applinzi.com", + "sinaapp.com", + "vipsinaapp.com", + "bounty-full.com", + "alpha.bounty-full.com", + "beta.bounty-full.com", + "static.land", + "dev.static.land", + "sites.static.land", + "apps.lair.io", + "*.stolos.io", + "spacekit.io", + "stackspace.space", + "storj.farm", + "temp-dns.com", + "diskstation.me", + "dscloud.biz", + "dscloud.me", + "dscloud.mobi", + "dsmynas.com", + "dsmynas.net", + "dsmynas.org", + "familyds.com", + "familyds.net", + "familyds.org", + "i234.me", + "myds.me", + "synology.me", + "vpnplus.to", + "taifun-dns.de", + "gda.pl", + "gdansk.pl", + "gdynia.pl", + "med.pl", + "sopot.pl", + "cust.dev.thingdust.io", + "cust.disrec.thingdust.io", + "cust.prod.thingdust.io", + "cust.testing.thingdust.io", + "bloxcms.com", + "townnews-staging.com", + "12hp.at", + "2ix.at", + "4lima.at", + "lima-city.at", + "12hp.ch", + "2ix.ch", + "4lima.ch", + "lima-city.ch", + "trafficplex.cloud", + "de.cool", + "12hp.de", + "2ix.de", + "4lima.de", + "lima-city.de", + "1337.pictures", + "clan.rip", + "lima-city.rocks", + "webspace.rocks", + "lima.zone", + "*.transurl.be", + "*.transurl.eu", + "*.transurl.nl", + "tuxfamily.org", + "dd-dns.de", + "diskstation.eu", + "diskstation.org", + "dray-dns.de", + "draydns.de", + "dyn-vpn.de", + "dynvpn.de", + "mein-vigor.de", + "my-vigor.de", + "my-wan.de", + "syno-ds.de", + "synology-diskstation.de", + "synology-ds.de", + "uber.space", + "hk.com", + "hk.org", + "ltd.hk", + "inc.hk", + "lib.de.us", + "router.management", + "v-info.info", + "wedeploy.io", + "wedeploy.me", + "wedeploy.sh", + "remotewd.com", + "wmflabs.org", + "cistron.nl", + "demon.nl", + "xs4all.space", + "yolasite.com", + "ybo.faith", + "yombo.me", + "homelink.one", + "ybo.party", + "ybo.review", + "ybo.science", + "ybo.trade", + "za.net", + "za.org", + "now.sh", +} + +var nodeLabels = [...]string{ + "aaa", + "aarp", + "abarth", + "abb", + "abbott", + "abbvie", + "abc", + "able", + "abogado", + "abudhabi", + "ac", + "academy", + "accenture", + "accountant", + "accountants", + "aco", + "active", + "actor", + "ad", + "adac", + "ads", + "adult", + "ae", + "aeg", + "aero", + "aetna", + "af", + "afamilycompany", + "afl", + "africa", + "ag", + "agakhan", + "agency", + "ai", + "aig", + "aigo", + "airbus", + "airforce", + "airtel", + "akdn", + "al", + "alfaromeo", + "alibaba", + "alipay", + "allfinanz", + "allstate", + "ally", + "alsace", + "alstom", + "am", + "americanexpress", + "americanfamily", + "amex", + "amfam", + "amica", + "amsterdam", + "analytics", + "android", + "anquan", + "anz", + "ao", + "aol", + "apartments", + "app", + "apple", + "aq", + "aquarelle", + "ar", + "arab", + "aramco", + "archi", + "army", + "arpa", + "art", + "arte", + "as", + "asda", + "asia", + "associates", + "at", + "athleta", + "attorney", + "au", + "auction", + "audi", + "audible", + "audio", + "auspost", + "author", + "auto", + "autos", + "avianca", + "aw", + "aws", + "ax", + "axa", + "az", + "azure", + "ba", + "baby", + "baidu", + "banamex", + "bananarepublic", + "band", + "bank", + "bar", + "barcelona", + "barclaycard", + "barclays", + "barefoot", + "bargains", + "baseball", + "basketball", + "bauhaus", + "bayern", + "bb", + "bbc", + "bbt", + "bbva", + "bcg", + "bcn", + "bd", + "be", + "beats", + "beauty", + "beer", + "bentley", + "berlin", + "best", + "bestbuy", + "bet", + "bf", + "bg", + "bh", + "bharti", + "bi", + "bible", + "bid", + "bike", + "bing", + "bingo", + "bio", + "biz", + "bj", + "black", + "blackfriday", + "blanco", + "blockbuster", + "blog", + "bloomberg", + "blue", + "bm", + "bms", + "bmw", + "bn", + "bnl", + "bnpparibas", + "bo", + "boats", + "boehringer", + "bofa", + "bom", + "bond", + "boo", + "book", + "booking", + "boots", + "bosch", + "bostik", + "boston", + "bot", + "boutique", + "box", + "br", + "bradesco", + "bridgestone", + "broadway", + "broker", + "brother", + "brussels", + "bs", + "bt", + "budapest", + "bugatti", + "build", + "builders", + "business", + "buy", + "buzz", + "bv", + "bw", + "by", + "bz", + "bzh", + "ca", + "cab", + "cafe", + "cal", + "call", + "calvinklein", + "cam", + "camera", + "camp", + "cancerresearch", + "canon", + "capetown", + "capital", + "capitalone", + "car", + "caravan", + "cards", + "care", + "career", + "careers", + "cars", + "cartier", + "casa", + "case", + "caseih", + "cash", + "casino", + "cat", + "catering", + "catholic", + "cba", + "cbn", + "cbre", + "cbs", + "cc", + "cd", + "ceb", + "center", + "ceo", + "cern", + "cf", + "cfa", + "cfd", + "cg", + "ch", + "chanel", + "channel", + "chase", + "chat", + "cheap", + "chintai", + "christmas", + "chrome", + "chrysler", + "church", + "ci", + "cipriani", + "circle", + "cisco", + "citadel", + "citi", + "citic", + "city", + "cityeats", + "ck", + "cl", + "claims", + "cleaning", + "click", + "clinic", + "clinique", + "clothing", + "cloud", + "club", + "clubmed", + "cm", + "cn", + "co", + "coach", + "codes", + "coffee", + "college", + "cologne", + "com", + "comcast", + "commbank", + "community", + "company", + "compare", + "computer", + "comsec", + "condos", + "construction", + "consulting", + "contact", + "contractors", + "cooking", + "cookingchannel", + "cool", + "coop", + "corsica", + "country", + "coupon", + "coupons", + "courses", + "cr", + "credit", + "creditcard", + "creditunion", + "cricket", + "crown", + "crs", + "cruise", + "cruises", + "csc", + "cu", + "cuisinella", + "cv", + "cw", + "cx", + "cy", + "cymru", + "cyou", + "cz", + "dabur", + "dad", + "dance", + "data", + "date", + "dating", + "datsun", + "day", + "dclk", + "dds", + "de", + "deal", + "dealer", + "deals", + "degree", + "delivery", + "dell", + "deloitte", + "delta", + "democrat", + "dental", + "dentist", + "desi", + "design", + "dev", + "dhl", + "diamonds", + "diet", + "digital", + "direct", + "directory", + "discount", + "discover", + "dish", + "diy", + "dj", + "dk", + "dm", + "dnp", + "do", + "docs", + "doctor", + "dodge", + "dog", + "doha", + "domains", + "dot", + "download", + "drive", + "dtv", + "dubai", + "duck", + "dunlop", + "duns", + "dupont", + "durban", + "dvag", + "dvr", + "dz", + "earth", + "eat", + "ec", + "eco", + "edeka", + "edu", + "education", + "ee", + "eg", + "email", + "emerck", + "energy", + "engineer", + "engineering", + "enterprises", + "epost", + "epson", + "equipment", + "er", + "ericsson", + "erni", + "es", + "esq", + "estate", + "esurance", + "et", + "etisalat", + "eu", + "eurovision", + "eus", + "events", + "everbank", + "exchange", + "expert", + "exposed", + "express", + "extraspace", + "fage", + "fail", + "fairwinds", + "faith", + "family", + "fan", + "fans", + "farm", + "farmers", + "fashion", + "fast", + "fedex", + "feedback", + "ferrari", + "ferrero", + "fi", + "fiat", + "fidelity", + "fido", + "film", + "final", + "finance", + "financial", + "fire", + "firestone", + "firmdale", + "fish", + "fishing", + "fit", + "fitness", + "fj", + "fk", + "flickr", + "flights", + "flir", + "florist", + "flowers", + "fly", + "fm", + "fo", + "foo", + "food", + "foodnetwork", + "football", + "ford", + "forex", + "forsale", + "forum", + "foundation", + "fox", + "fr", + "free", + "fresenius", + "frl", + "frogans", + "frontdoor", + "frontier", + "ftr", + "fujitsu", + "fujixerox", + "fun", + "fund", + "furniture", + "futbol", + "fyi", + "ga", + "gal", + "gallery", + "gallo", + "gallup", + "game", + "games", + "gap", + "garden", + "gb", + "gbiz", + "gd", + "gdn", + "ge", + "gea", + "gent", + "genting", + "george", + "gf", + "gg", + "ggee", + "gh", + "gi", + "gift", + "gifts", + "gives", + "giving", + "gl", + "glade", + "glass", + "gle", + "global", + "globo", + "gm", + "gmail", + "gmbh", + "gmo", + "gmx", + "gn", + "godaddy", + "gold", + "goldpoint", + "golf", + "goo", + "goodhands", + "goodyear", + "goog", + "google", + "gop", + "got", + "gov", + "gp", + "gq", + "gr", + "grainger", + "graphics", + "gratis", + "green", + "gripe", + "grocery", + "group", + "gs", + "gt", + "gu", + "guardian", + "gucci", + "guge", + "guide", + "guitars", + "guru", + "gw", + "gy", + "hair", + "hamburg", + "hangout", + "haus", + "hbo", + "hdfc", + "hdfcbank", + "health", + "healthcare", + "help", + "helsinki", + "here", + "hermes", + "hgtv", + "hiphop", + "hisamitsu", + "hitachi", + "hiv", + "hk", + "hkt", + "hm", + "hn", + "hockey", + "holdings", + "holiday", + "homedepot", + "homegoods", + "homes", + "homesense", + "honda", + "honeywell", + "horse", + "hospital", + "host", + "hosting", + "hot", + "hoteles", + "hotels", + "hotmail", + "house", + "how", + "hr", + "hsbc", + "ht", + "hu", + "hughes", + "hyatt", + "hyundai", + "ibm", + "icbc", + "ice", + "icu", + "id", + "ie", + "ieee", + "ifm", + "ikano", + "il", + "im", + "imamat", + "imdb", + "immo", + "immobilien", + "in", + "industries", + "infiniti", + "info", + "ing", + "ink", + "institute", + "insurance", + "insure", + "int", + "intel", + "international", + "intuit", + "investments", + "io", + "ipiranga", + "iq", + "ir", + "irish", + "is", + "iselect", + "ismaili", + "ist", + "istanbul", + "it", + "itau", + "itv", + "iveco", + "iwc", + "jaguar", + "java", + "jcb", + "jcp", + "je", + "jeep", + "jetzt", + "jewelry", + "jio", + "jlc", + "jll", + "jm", + "jmp", + "jnj", + "jo", + "jobs", + "joburg", + "jot", + "joy", + "jp", + "jpmorgan", + "jprs", + "juegos", + "juniper", + "kaufen", + "kddi", + "ke", + "kerryhotels", + "kerrylogistics", + "kerryproperties", + "kfh", + "kg", + "kh", + "ki", + "kia", + "kim", + "kinder", + "kindle", + "kitchen", + "kiwi", + "km", + "kn", + "koeln", + "komatsu", + "kosher", + "kp", + "kpmg", + "kpn", + "kr", + "krd", + "kred", + "kuokgroup", + "kw", + "ky", + "kyoto", + "kz", + "la", + "lacaixa", + "ladbrokes", + "lamborghini", + "lamer", + "lancaster", + "lancia", + "lancome", + "land", + "landrover", + "lanxess", + "lasalle", + "lat", + "latino", + "latrobe", + "law", + "lawyer", + "lb", + "lc", + "lds", + "lease", + "leclerc", + "lefrak", + "legal", + "lego", + "lexus", + "lgbt", + "li", + "liaison", + "lidl", + "life", + "lifeinsurance", + "lifestyle", + "lighting", + "like", + "lilly", + "limited", + "limo", + "lincoln", + "linde", + "link", + "lipsy", + "live", + "living", + "lixil", + "lk", + "loan", + "loans", + "locker", + "locus", + "loft", + "lol", + "london", + "lotte", + "lotto", + "love", + "lpl", + "lplfinancial", + "lr", + "ls", + "lt", + "ltd", + "ltda", + "lu", + "lundbeck", + "lupin", + "luxe", + "luxury", + "lv", + "ly", + "ma", + "macys", + "madrid", + "maif", + "maison", + "makeup", + "man", + "management", + "mango", + "map", + "market", + "marketing", + "markets", + "marriott", + "marshalls", + "maserati", + "mattel", + "mba", + "mc", + "mckinsey", + "md", + "me", + "med", + "media", + "meet", + "melbourne", + "meme", + "memorial", + "men", + "menu", + "meo", + "merckmsd", + "metlife", + "mg", + "mh", + "miami", + "microsoft", + "mil", + "mini", + "mint", + "mit", + "mitsubishi", + "mk", + "ml", + "mlb", + "mls", + "mm", + "mma", + "mn", + "mo", + "mobi", + "mobile", + "mobily", + "moda", + "moe", + "moi", + "mom", + "monash", + "money", + "monster", + "mopar", + "mormon", + "mortgage", + "moscow", + "moto", + "motorcycles", + "mov", + "movie", + "movistar", + "mp", + "mq", + "mr", + "ms", + "msd", + "mt", + "mtn", + "mtpc", + "mtr", + "mu", + "museum", + "mutual", + "mv", + "mw", + "mx", + "my", + "mz", + "na", + "nab", + "nadex", + "nagoya", + "name", + "nationwide", + "natura", + "navy", + "nba", + "nc", + "ne", + "nec", + "net", + "netbank", + "netflix", + "network", + "neustar", + "new", + "newholland", + "news", + "next", + "nextdirect", + "nexus", + "nf", + "nfl", + "ng", + "ngo", + "nhk", + "ni", + "nico", + "nike", + "nikon", + "ninja", + "nissan", + "nissay", + "nl", + "no", + "nokia", + "northwesternmutual", + "norton", + "now", + "nowruz", + "nowtv", + "np", + "nr", + "nra", + "nrw", + "ntt", + "nu", + "nyc", + "nz", + "obi", + "observer", + "off", + "office", + "okinawa", + "olayan", + "olayangroup", + "oldnavy", + "ollo", + "om", + "omega", + "one", + "ong", + "onion", + "onl", + "online", + "onyourside", + "ooo", + "open", + "oracle", + "orange", + "org", + "organic", + "origins", + "osaka", + "otsuka", + "ott", + "ovh", + "pa", + "page", + "panasonic", + "panerai", + "paris", + "pars", + "partners", + "parts", + "party", + "passagens", + "pay", + "pccw", + "pe", + "pet", + "pf", + "pfizer", + "pg", + "ph", + "pharmacy", + "phd", + "philips", + "phone", + "photo", + "photography", + "photos", + "physio", + "piaget", + "pics", + "pictet", + "pictures", + "pid", + "pin", + "ping", + "pink", + "pioneer", + "pizza", + "pk", + "pl", + "place", + "play", + "playstation", + "plumbing", + "plus", + "pm", + "pn", + "pnc", + "pohl", + "poker", + "politie", + "porn", + "post", + "pr", + "pramerica", + "praxi", + "press", + "prime", + "pro", + "prod", + "productions", + "prof", + "progressive", + "promo", + "properties", + "property", + "protection", + "pru", + "prudential", + "ps", + "pt", + "pub", + "pw", + "pwc", + "py", + "qa", + "qpon", + "quebec", + "quest", + "qvc", + "racing", + "radio", + "raid", + "re", + "read", + "realestate", + "realtor", + "realty", + "recipes", + "red", + "redstone", + "redumbrella", + "rehab", + "reise", + "reisen", + "reit", + "reliance", + "ren", + "rent", + "rentals", + "repair", + "report", + "republican", + "rest", + "restaurant", + "review", + "reviews", + "rexroth", + "rich", + "richardli", + "ricoh", + "rightathome", + "ril", + "rio", + "rip", + "rmit", + "ro", + "rocher", + "rocks", + "rodeo", + "rogers", + "room", + "rs", + "rsvp", + "ru", + "rugby", + "ruhr", + "run", + "rw", + "rwe", + "ryukyu", + "sa", + "saarland", + "safe", + "safety", + "sakura", + "sale", + "salon", + "samsclub", + "samsung", + "sandvik", + "sandvikcoromant", + "sanofi", + "sap", + "sapo", + "sarl", + "sas", + "save", + "saxo", + "sb", + "sbi", + "sbs", + "sc", + "sca", + "scb", + "schaeffler", + "schmidt", + "scholarships", + "school", + "schule", + "schwarz", + "science", + "scjohnson", + "scor", + "scot", + "sd", + "se", + "search", + "seat", + "secure", + "security", + "seek", + "select", + "sener", + "services", + "ses", + "seven", + "sew", + "sex", + "sexy", + "sfr", + "sg", + "sh", + "shangrila", + "sharp", + "shaw", + "shell", + "shia", + "shiksha", + "shoes", + "shop", + "shopping", + "shouji", + "show", + "showtime", + "shriram", + "si", + "silk", + "sina", + "singles", + "site", + "sj", + "sk", + "ski", + "skin", + "sky", + "skype", + "sl", + "sling", + "sm", + "smart", + "smile", + "sn", + "sncf", + "so", + "soccer", + "social", + "softbank", + "software", + "sohu", + "solar", + "solutions", + "song", + "sony", + "soy", + "space", + "spiegel", + "spot", + "spreadbetting", + "sr", + "srl", + "srt", + "st", + "stada", + "staples", + "star", + "starhub", + "statebank", + "statefarm", + "statoil", + "stc", + "stcgroup", + "stockholm", + "storage", + "store", + "stream", + "studio", + "study", + "style", + "su", + "sucks", + "supplies", + "supply", + "support", + "surf", + "surgery", + "suzuki", + "sv", + "swatch", + "swiftcover", + "swiss", + "sx", + "sy", + "sydney", + "symantec", + "systems", + "sz", + "tab", + "taipei", + "talk", + "taobao", + "target", + "tatamotors", + "tatar", + "tattoo", + "tax", + "taxi", + "tc", + "tci", + "td", + "tdk", + "team", + "tech", + "technology", + "tel", + "telecity", + "telefonica", + "temasek", + "tennis", + "teva", + "tf", + "tg", + "th", + "thd", + "theater", + "theatre", + "tiaa", + "tickets", + "tienda", + "tiffany", + "tips", + "tires", + "tirol", + "tj", + "tjmaxx", + "tjx", + "tk", + "tkmaxx", + "tl", + "tm", + "tmall", + "tn", + "to", + "today", + "tokyo", + "tools", + "top", + "toray", + "toshiba", + "total", + "tours", + "town", + "toyota", + "toys", + "tr", + "trade", + "trading", + "training", + "travel", + "travelchannel", + "travelers", + "travelersinsurance", + "trust", + "trv", + "tt", + "tube", + "tui", + "tunes", + "tushu", + "tv", + "tvs", + "tw", + "tz", + "ua", + "ubank", + "ubs", + "uconnect", + "ug", + "uk", + "unicom", + "university", + "uno", + "uol", + "ups", + "us", + "uy", + "uz", + "va", + "vacations", + "vana", + "vanguard", + "vc", + "ve", + "vegas", + "ventures", + "verisign", + "versicherung", + "vet", + "vg", + "vi", + "viajes", + "video", + "vig", + "viking", + "villas", + "vin", + "vip", + "virgin", + "visa", + "vision", + "vista", + "vistaprint", + "viva", + "vivo", + "vlaanderen", + "vn", + "vodka", + "volkswagen", + "volvo", + "vote", + "voting", + "voto", + "voyage", + "vu", + "vuelos", + "wales", + "walmart", + "walter", + "wang", + "wanggou", + "warman", + "watch", + "watches", + "weather", + "weatherchannel", + "webcam", + "weber", + "website", + "wed", + "wedding", + "weibo", + "weir", + "wf", + "whoswho", + "wien", + "wiki", + "williamhill", + "win", + "windows", + "wine", + "winners", + "wme", + "wolterskluwer", + "woodside", + "work", + "works", + "world", + "wow", + "ws", + "wtc", + "wtf", + "xbox", + "xerox", + "xfinity", + "xihuan", + "xin", + "xn--11b4c3d", + "xn--1ck2e1b", + "xn--1qqw23a", + "xn--2scrj9c", + "xn--30rr7y", + "xn--3bst00m", + "xn--3ds443g", + "xn--3e0b707e", + "xn--3hcrj9c", + "xn--3oq18vl8pn36a", + "xn--3pxu8k", + "xn--42c2d9a", + "xn--45br5cyl", + "xn--45brj9c", + "xn--45q11c", + "xn--4gbrim", + "xn--54b7fta0cc", + "xn--55qw42g", + "xn--55qx5d", + "xn--5su34j936bgsg", + "xn--5tzm5g", + "xn--6frz82g", + "xn--6qq986b3xl", + "xn--80adxhks", + "xn--80ao21a", + "xn--80aqecdr1a", + "xn--80asehdb", + "xn--80aswg", + "xn--8y0a063a", + "xn--90a3ac", + "xn--90ae", + "xn--90ais", + "xn--9dbq2a", + "xn--9et52u", + "xn--9krt00a", + "xn--b4w605ferd", + "xn--bck1b9a5dre4c", + "xn--c1avg", + "xn--c2br7g", + "xn--cck2b3b", + "xn--cg4bki", + "xn--clchc0ea0b2g2a9gcd", + "xn--czr694b", + "xn--czrs0t", + "xn--czru2d", + "xn--d1acj3b", + "xn--d1alf", + "xn--e1a4c", + "xn--eckvdtc9d", + "xn--efvy88h", + "xn--estv75g", + "xn--fct429k", + "xn--fhbei", + "xn--fiq228c5hs", + "xn--fiq64b", + "xn--fiqs8s", + "xn--fiqz9s", + "xn--fjq720a", + "xn--flw351e", + "xn--fpcrj9c3d", + "xn--fzc2c9e2c", + "xn--fzys8d69uvgm", + "xn--g2xx48c", + "xn--gckr3f0f", + "xn--gecrj9c", + "xn--gk3at1e", + "xn--h2breg3eve", + "xn--h2brj9c", + "xn--h2brj9c8c", + "xn--hxt814e", + "xn--i1b6b1a6a2e", + "xn--imr513n", + "xn--io0a7i", + "xn--j1aef", + "xn--j1amh", + "xn--j6w193g", + "xn--jlq61u9w7b", + "xn--jvr189m", + "xn--kcrx77d1x4a", + "xn--kprw13d", + "xn--kpry57d", + "xn--kpu716f", + "xn--kput3i", + "xn--l1acc", + "xn--lgbbat1ad8j", + "xn--mgb2ddes", + "xn--mgb9awbf", + "xn--mgba3a3ejt", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "xn--mgba7c0bbn0a", + "xn--mgbaakc7dvf", + "xn--mgbaam7a8h", + "xn--mgbab2bd", + "xn--mgbai9a5eva00b", + "xn--mgbai9azgqp6j", + "xn--mgbayh7gpa", + "xn--mgbb9fbpob", + "xn--mgbbh1a71e", + "xn--mgbc0a9azcg", + "xn--mgbca7dzdo", + "xn--mgberp4a5d4a87g", + "xn--mgberp4a5d4ar", + "xn--mgbgu82a", + "xn--mgbi4ecexp", + "xn--mgbpl2fh", + "xn--mgbqly7c0a67fbc", + "xn--mgbqly7cvafr", + "xn--mgbt3dhd", + "xn--mgbtf8fl", + "xn--mgbtx2b", + "xn--mgbx4cd0ab", + "xn--mix082f", + "xn--mix891f", + "xn--mk1bu44c", + "xn--mxtq1m", + "xn--ngbc5azd", + "xn--ngbe9e0a", + "xn--ngbrx", + "xn--nnx388a", + "xn--node", + "xn--nqv7f", + "xn--nqv7fs00ema", + "xn--nyqy26a", + "xn--o3cw4h", + "xn--ogbpf8fl", + "xn--p1acf", + "xn--p1ai", + "xn--pbt977c", + "xn--pgbs0dh", + "xn--pssy2u", + "xn--q9jyb4c", + "xn--qcka1pmc", + "xn--qxam", + "xn--rhqv96g", + "xn--rovu88b", + "xn--rvc1e0am3e", + "xn--s9brj9c", + "xn--ses554g", + "xn--t60b56a", + "xn--tckwe", + "xn--tiq49xqyj", + "xn--unup4y", + "xn--vermgensberater-ctb", + "xn--vermgensberatung-pwb", + "xn--vhquv", + "xn--vuq861b", + "xn--w4r85el8fhu5dnra", + "xn--w4rs40l", + "xn--wgbh1c", + "xn--wgbl6a", + "xn--xhq521b", + "xn--xkc2al3hye2a", + "xn--xkc2dl3a5ee0h", + "xn--y9a3aq", + "xn--yfro4i67o", + "xn--ygbi2ammx", + "xn--zfr164b", + "xperia", + "xxx", + "xyz", + "yachts", + "yahoo", + "yamaxun", + "yandex", + "ye", + "yodobashi", + "yoga", + "yokohama", + "you", + "youtube", + "yt", + "yun", + "za", + "zappos", + "zara", + "zero", + "zip", + "zippo", + "zm", + "zone", + "zuerich", + "zw", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "nom", + "ac", + "blogspot", + "co", + "gov", + "mil", + "net", + "nom", + "org", + "sch", + "accident-investigation", + "accident-prevention", + "aerobatic", + "aeroclub", + "aerodrome", + "agents", + "air-surveillance", + "air-traffic-control", + "aircraft", + "airline", + "airport", + "airtraffic", + "ambulance", + "amusement", + "association", + "author", + "ballooning", + "broker", + "caa", + "cargo", + "catering", + "certification", + "championship", + "charter", + "civilaviation", + "club", + "conference", + "consultant", + "consulting", + "control", + "council", + "crew", + "design", + "dgca", + "educator", + "emergency", + "engine", + "engineer", + "entertainment", + "equipment", + "exchange", + "express", + "federation", + "flight", + "freight", + "fuel", + "gliding", + "government", + "groundhandling", + "group", + "hanggliding", + "homebuilt", + "insurance", + "journal", + "journalist", + "leasing", + "logistics", + "magazine", + "maintenance", + "media", + "microlight", + "modelling", + "navigation", + "parachuting", + "paragliding", + "passenger-association", + "pilot", + "press", + "production", + "recreation", + "repbody", + "res", + "research", + "rotorcraft", + "safety", + "scientist", + "services", + "show", + "skydiving", + "software", + "student", + "trader", + "trading", + "trainer", + "union", + "workinggroup", + "works", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "net", + "nom", + "org", + "com", + "net", + "nom", + "off", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "nom", + "org", + "blogspot", + "co", + "ed", + "gv", + "it", + "og", + "pb", + "com", + "edu", + "gob", + "gov", + "int", + "mil", + "musica", + "net", + "org", + "tur", + "blogspot", + "e164", + "in-addr", + "ip6", + "iris", + "uri", + "urn", + "gov", + "cloudns", + "12hp", + "2ix", + "4lima", + "ac", + "biz", + "co", + "futurecms", + "futurehosting", + "futuremailing", + "gv", + "info", + "lima-city", + "or", + "ortsinfo", + "priv", + "blogspot", + "ex", + "kunden", + "act", + "asn", + "com", + "conf", + "edu", + "gov", + "id", + "info", + "net", + "nsw", + "nt", + "org", + "oz", + "qld", + "sa", + "tas", + "vic", + "wa", + "blogspot", + "act", + "nsw", + "nt", + "qld", + "sa", + "tas", + "vic", + "wa", + "qld", + "sa", + "tas", + "vic", + "wa", + "com", + "biz", + "com", + "edu", + "gov", + "info", + "int", + "mil", + "name", + "net", + "org", + "pp", + "pro", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "biz", + "co", + "com", + "edu", + "gov", + "info", + "net", + "org", + "store", + "tv", + "ac", + "blogspot", + "transurl", + "webhosting", + "gov", + "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "a", + "b", + "barsy", + "blogspot", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "edu", + "or", + "org", + "cloudns", + "dscloud", + "dyndns", + "for-better", + "for-more", + "for-some", + "for-the", + "mmafan", + "myftp", + "no-ip", + "selfip", + "webhop", + "asso", + "barreau", + "blogspot", + "gouv", + "com", + "edu", + "gov", + "net", + "org", + "academia", + "agro", + "arte", + "blog", + "bolivia", + "ciencia", + "com", + "cooperativa", + "democracia", + "deporte", + "ecologia", + "economia", + "edu", + "empresa", + "gob", + "indigena", + "industria", + "info", + "int", + "medicina", + "mil", + "movimiento", + "musica", + "natural", + "net", + "nombre", + "noticias", + "org", + "patria", + "plurinacional", + "politica", + "profesional", + "pueblo", + "revista", + "salud", + "tecnologia", + "tksat", + "transporte", + "tv", + "web", + "wiki", + "9guacu", + "abc", + "adm", + "adv", + "agr", + "aju", + "am", + "anani", + "aparecida", + "arq", + "art", + "ato", + "b", + "belem", + "bhz", + "bio", + "blog", + "bmd", + "boavista", + "bsb", + "campinagrande", + "campinas", + "caxias", + "cim", + "cng", + "cnt", + "com", + "contagem", + "coop", + "cri", + "cuiaba", + "curitiba", + "def", + "ecn", + "eco", + "edu", + "emp", + "eng", + "esp", + "etc", + "eti", + "far", + "feira", + "flog", + "floripa", + "fm", + "fnd", + "fortal", + "fot", + "foz", + "fst", + "g12", + "ggf", + "goiania", + "gov", + "gru", + "imb", + "ind", + "inf", + "jab", + "jampa", + "jdf", + "joinville", + "jor", + "jus", + "leg", + "lel", + "londrina", + "macapa", + "maceio", + "manaus", + "maringa", + "mat", + "med", + "mil", + "morena", + "mp", + "mus", + "natal", + "net", + "niteroi", + "nom", + "not", + "ntr", + "odo", + "org", + "osasco", + "palmas", + "poa", + "ppg", + "pro", + "psc", + "psi", + "pvh", + "qsl", + "radio", + "rec", + "recife", + "ribeirao", + "rio", + "riobranco", + "riopreto", + "salvador", + "sampa", + "santamaria", + "santoandre", + "saobernardo", + "saogonca", + "sjc", + "slg", + "slz", + "sorocaba", + "srv", + "taxi", + "teo", + "the", + "tmp", + "trd", + "tur", + "tv", + "udi", + "vet", + "vix", + "vlog", + "wiki", + "zlg", + "blogspot", + "ac", + "al", + "am", + "ap", + "ba", + "ce", + "df", + "es", + "go", + "ma", + "mg", + "ms", + "mt", + "pa", + "pb", + "pe", + "pi", + "pr", + "rj", + "rn", + "ro", + "rr", + "rs", + "sc", + "se", + "sp", + "to", + "ac", + "al", + "am", + "ap", + "ba", + "ce", + "df", + "es", + "go", + "ma", + "mg", + "ms", + "mt", + "pa", + "pb", + "pe", + "pi", + "pr", + "rj", + "rn", + "ro", + "rr", + "rs", + "sc", + "se", + "sp", + "to", + "com", + "edu", + "gov", + "net", + "org", + "we", + "com", + "edu", + "gov", + "net", + "org", + "co", + "org", + "com", + "gov", + "mil", + "nym", + "of", + "blogspot", + "com", + "edu", + "gov", + "net", + "nym", + "org", + "za", + "1password", + "ab", + "awdev", + "bc", + "blogspot", + "co", + "gc", + "mb", + "nb", + "nf", + "nl", + "no-ip", + "ns", + "nt", + "nu", + "on", + "pe", + "qc", + "sk", + "yk", + "cloudns", + "fantasyleague", + "ftpaccess", + "game-server", + "myphotos", + "scrapping", + "twmail", + "gov", + "blogspot", + "12hp", + "2ix", + "4lima", + "blogspot", + "gotdns", + "lima-city", + "square7", + "ac", + "asso", + "co", + "com", + "ed", + "edu", + "go", + "gouv", + "int", + "md", + "net", + "or", + "org", + "presse", + "xn--aroport-bya", + "www", + "blogspot", + "co", + "gob", + "gov", + "mil", + "nom", + "magentosite", + "sensiosite", + "statics", + "trafficplex", + "vapor", + "cloudns", + "co", + "com", + "gov", + "net", + "ac", + "ah", + "bj", + "com", + "cq", + "edu", + "fj", + "gd", + "gov", + "gs", + "gx", + "gz", + "ha", + "hb", + "he", + "hi", + "hk", + "hl", + "hn", + "jl", + "js", + "jx", + "ln", + "mil", + "mo", + "net", + "nm", + "nx", + "org", + "qh", + "sc", + "sd", + "sh", + "sn", + "sx", + "tj", + "tw", + "xj", + "xn--55qx5d", + "xn--io0a7i", + "xn--od0alg", + "xz", + "yn", + "zj", + "amazonaws", + "cn-north-1", + "compute", + "eb", + "elb", + "s3", + "cn-north-1", + "arts", + "com", + "edu", + "firm", + "gov", + "info", + "int", + "mil", + "net", + "nodum", + "nom", + "org", + "rec", + "web", + "blogspot", + "0emm", + "1kapp", + "1password", + "3utilities", + "4u", + "africa", + "alpha-myqnapcloud", + "amazonaws", + "appchizi", + "applinzi", + "appspot", + "ar", + "barsyonline", + "betainabox", + "bitballoon", + "blogdns", + "blogspot", + "blogsyte", + "bloxcms", + "bounty-full", + "bplaced", + "br", + "cechire", + "ciscofreak", + "cloudcontrolapp", + "cloudcontrolled", + "cn", + "co", + "codespot", + "damnserver", + "ddnsfree", + "ddnsgeek", + "ddnsking", + "de", + "dev-myqnapcloud", + "ditchyourip", + "dnsalias", + "dnsdojo", + "dnsiskinky", + "doesntexist", + "dontexist", + "doomdns", + "drayddns", + "dreamhosters", + "dsmynas", + "dyn-o-saur", + "dynalias", + "dyndns-at-home", + "dyndns-at-work", + "dyndns-blog", + "dyndns-free", + "dyndns-home", + "dyndns-ip", + "dyndns-mail", + "dyndns-office", + "dyndns-pics", + "dyndns-remote", + "dyndns-server", + "dyndns-web", + "dyndns-wiki", + "dyndns-work", + "dynns", + "elasticbeanstalk", + "est-a-la-maison", + "est-a-la-masion", + "est-le-patron", + "est-mon-blogueur", + "eu", + "evennode", + "familyds", + "fbsbx", + "firebaseapp", + "firewall-gateway", + "flynnhub", + "freebox-os", + "freeboxos", + "from-ak", + "from-al", + "from-ar", + "from-ca", + "from-ct", + "from-dc", + "from-de", + "from-fl", + "from-ga", + "from-hi", + "from-ia", + "from-id", + "from-il", + "from-in", + "from-ks", + "from-ky", + "from-ma", + "from-md", + "from-mi", + "from-mn", + "from-mo", + "from-ms", + "from-mt", + "from-nc", + "from-nd", + "from-ne", + "from-nh", + "from-nj", + "from-nm", + "from-nv", + "from-oh", + "from-ok", + "from-or", + "from-pa", + "from-pr", + "from-ri", + "from-sc", + "from-sd", + "from-tn", + "from-tx", + "from-ut", + "from-va", + "from-vt", + "from-wa", + "from-wi", + "from-wv", + "from-wy", + "gb", + "geekgalaxy", + "getmyip", + "giize", + "githubusercontent", + "gleeze", + "googleapis", + "googlecode", + "gotdns", + "gotpantheon", + "gr", + "health-carereform", + "herokuapp", + "herokussl", + "hk", + "hobby-site", + "homelinux", + "homesecuritymac", + "homesecuritypc", + "homeunix", + "hu", + "iamallama", + "is-a-anarchist", + "is-a-blogger", + "is-a-bookkeeper", + "is-a-bulls-fan", + "is-a-caterer", + "is-a-chef", + "is-a-conservative", + "is-a-cpa", + "is-a-cubicle-slave", + "is-a-democrat", + "is-a-designer", + "is-a-doctor", + "is-a-financialadvisor", + "is-a-geek", + "is-a-green", + "is-a-guru", + "is-a-hard-worker", + "is-a-hunter", + "is-a-landscaper", + "is-a-lawyer", + "is-a-liberal", + "is-a-libertarian", + "is-a-llama", + "is-a-musician", + "is-a-nascarfan", + "is-a-nurse", + "is-a-painter", + "is-a-personaltrainer", + "is-a-photographer", + "is-a-player", + "is-a-republican", + "is-a-rockstar", + "is-a-socialist", + "is-a-student", + "is-a-teacher", + "is-a-techie", + "is-a-therapist", + "is-an-accountant", + "is-an-actor", + "is-an-actress", + "is-an-anarchist", + "is-an-artist", + "is-an-engineer", + "is-an-entertainer", + "is-certified", + "is-gone", + "is-into-anime", + "is-into-cars", + "is-into-cartoons", + "is-into-games", + "is-leet", + "is-not-certified", + "is-slick", + "is-uberleet", + "is-with-theband", + "isa-geek", + "isa-hockeynut", + "issmarterthanyou", + "jdevcloud", + "joyent", + "jpn", + "kozow", + "kr", + "likes-pie", + "likescandy", + "logoip", + "loseyourip", + "meteorapp", + "mex", + "myactivedirectory", + "myasustor", + "mydrobo", + "myqnapcloud", + "mysecuritycamera", + "myshopblocks", + "mytuleap", + "myvnc", + "neat-url", + "net-freaks", + "netlify", + "nfshost", + "no", + "on-aptible", + "onthewifi", + "ooguy", + "operaunite", + "outsystemscloud", + "ownprovider", + "pagefrontapp", + "pagespeedmobilizer", + "pgfog", + "pixolino", + "point2this", + "prgmr", + "publishproxy", + "qa2", + "qc", + "quicksytes", + "quipelements", + "rackmaze", + "remotewd", + "rhcloud", + "ru", + "sa", + "saves-the-whales", + "scrysec", + "se", + "securitytactics", + "selfip", + "sells-for-less", + "sells-for-u", + "servebbs", + "servebeer", + "servecounterstrike", + "serveexchange", + "serveftp", + "servegame", + "servehalflife", + "servehttp", + "servehumour", + "serveirc", + "servemp3", + "servep2p", + "servepics", + "servequake", + "servesarcasm", + "simple-url", + "sinaapp", + "space-to-rent", + "stufftoread", + "teaches-yoga", + "temp-dns", + "theworkpc", + "townnews-staging", + "uk", + "unusualperson", + "us", + "uy", + "vipsinaapp", + "withgoogle", + "withyoutube", + "workisboring", + "wpdevcloud", + "writesthisblog", + "xenapponazure", + "yolasite", + "za", + "ap-northeast-1", + "ap-northeast-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "compute", + "compute-1", + "elb", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "s3", + "s3-ap-northeast-1", + "s3-ap-northeast-2", + "s3-ap-south-1", + "s3-ap-southeast-1", + "s3-ap-southeast-2", + "s3-ca-central-1", + "s3-eu-central-1", + "s3-eu-west-1", + "s3-eu-west-2", + "s3-eu-west-3", + "s3-external-1", + "s3-fips-us-gov-west-1", + "s3-sa-east-1", + "s3-us-east-2", + "s3-us-gov-west-1", + "s3-us-west-1", + "s3-us-west-2", + "s3-website-ap-northeast-1", + "s3-website-ap-southeast-1", + "s3-website-ap-southeast-2", + "s3-website-eu-west-1", + "s3-website-sa-east-1", + "s3-website-us-east-1", + "s3-website-us-west-1", + "s3-website-us-west-2", + "sa-east-1", + "us-east-1", + "us-east-2", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "alpha", + "beta", + "ap-northeast-1", + "ap-northeast-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-west-1", + "us-west-1", + "us-west-2", + "eu-1", + "eu-2", + "eu-3", + "eu-4", + "us-1", + "us-2", + "us-3", + "us-4", + "apps", + "cns", + "eu", + "xen", + "de", + "ac", + "co", + "ed", + "fi", + "go", + "or", + "sa", + "com", + "edu", + "gov", + "inf", + "net", + "org", + "blogspot", + "com", + "edu", + "net", + "org", + "ath", + "gov", + "info", + "ac", + "biz", + "com", + "ekloges", + "gov", + "ltd", + "name", + "net", + "org", + "parliament", + "press", + "pro", + "tm", + "blogspot", + "blogspot", + "co", + "e4", + "metacentrum", + "realm", + "cloud", + "custom", + "12hp", + "2ix", + "4lima", + "barsy", + "blogspot", + "bplaced", + "com", + "cosidns", + "dd-dns", + "ddnss", + "dnshome", + "dnsupdater", + "dray-dns", + "draydns", + "dyn-ip24", + "dyn-vpn", + "dynamisches-dns", + "dyndns1", + "dynvpn", + "firewall-gateway", + "fuettertdasnetz", + "git-repos", + "goip", + "home-webserver", + "internet-dns", + "isteingeek", + "istmein", + "keymachine", + "l-o-g-i-n", + "lcube-server", + "lebtimnetz", + "leitungsen", + "lima-city", + "logoip", + "mein-vigor", + "my-gateway", + "my-router", + "my-vigor", + "my-wan", + "myhome-server", + "spdns", + "square7", + "svn-repos", + "syno-ds", + "synology-diskstation", + "synology-ds", + "taifun-dns", + "traeumtgerade", + "dyn", + "dyn", + "dyndns", + "dyn", + "biz", + "blogspot", + "co", + "firm", + "reg", + "store", + "com", + "edu", + "gov", + "net", + "org", + "art", + "com", + "edu", + "gob", + "gov", + "mil", + "net", + "org", + "sld", + "web", + "art", + "asso", + "com", + "edu", + "gov", + "net", + "org", + "pol", + "com", + "edu", + "fin", + "gob", + "gov", + "info", + "k12", + "med", + "mil", + "net", + "org", + "pro", + "aip", + "com", + "edu", + "fie", + "gov", + "lib", + "med", + "org", + "pri", + "riik", + "blogspot", + "com", + "edu", + "eun", + "gov", + "mil", + "name", + "net", + "org", + "sci", + "blogspot", + "com", + "edu", + "gob", + "nom", + "org", + "blogspot", + "compute", + "biz", + "com", + "edu", + "gov", + "info", + "name", + "net", + "org", + "1password", + "barsy", + "cloudns", + "diskstation", + "mycd", + "spdns", + "transurl", + "wellbeingzone", + "party", + "user", + "ybo", + "storj", + "aland", + "blogspot", + "dy", + "iki", + "ptplus", + "aeroport", + "assedic", + "asso", + "avocat", + "avoues", + "blogspot", + "cci", + "chambagri", + "chirurgiens-dentistes", + "chirurgiens-dentistes-en-france", + "com", + "experts-comptables", + "fbx-os", + "fbxos", + "freebox-os", + "freeboxos", + "geometre-expert", + "gouv", + "greta", + "huissier-justice", + "medecin", + "nom", + "notaires", + "on-web", + "pharmacien", + "port", + "prd", + "presse", + "tm", + "veterinaire", + "nom", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "pvt", + "co", + "cya", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "org", + "com", + "edu", + "gov", + "ltd", + "mod", + "org", + "co", + "com", + "edu", + "net", + "nom", + "org", + "ac", + "com", + "edu", + "gov", + "net", + "org", + "cloud", + "asso", + "com", + "edu", + "mobi", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "net", + "nym", + "org", + "com", + "edu", + "gob", + "ind", + "mil", + "net", + "nom", + "org", + "co", + "com", + "edu", + "gov", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "idv", + "inc", + "ltd", + "net", + "org", + "xn--55qx5d", + "xn--ciqpn", + "xn--gmq050i", + "xn--gmqw5a", + "xn--io0a7i", + "xn--lcvr32d", + "xn--mk0axi", + "xn--mxtq1m", + "xn--od0alg", + "xn--od0aq3b", + "xn--tn0ag", + "xn--uc0atv", + "xn--uc0ay4a", + "xn--wcvs22d", + "xn--zf0avx", + "com", + "edu", + "gob", + "mil", + "net", + "nom", + "org", + "cloudaccess", + "freesite", + "opencraft", + "blogspot", + "com", + "from", + "iz", + "name", + "adult", + "art", + "asso", + "com", + "coop", + "edu", + "firm", + "gouv", + "info", + "med", + "net", + "org", + "perso", + "pol", + "pro", + "rel", + "shop", + "2000", + "agrar", + "blogspot", + "bolt", + "casino", + "city", + "co", + "erotica", + "erotika", + "film", + "forum", + "games", + "hotel", + "info", + "ingatlan", + "jogasz", + "konyvelo", + "lakas", + "media", + "news", + "org", + "priv", + "reklam", + "sex", + "shop", + "sport", + "suli", + "szex", + "tm", + "tozsde", + "utazas", + "video", + "ac", + "biz", + "co", + "desa", + "go", + "mil", + "my", + "net", + "or", + "sch", + "web", + "blogspot", + "blogspot", + "gov", + "ac", + "co", + "gov", + "idf", + "k12", + "muni", + "net", + "org", + "blogspot", + "ac", + "co", + "com", + "net", + "nom", + "org", + "ro", + "tt", + "tv", + "ltd", + "plc", + "ac", + "barsy", + "blogspot", + "cloudns", + "co", + "edu", + "firm", + "gen", + "gov", + "ind", + "mil", + "net", + "nic", + "org", + "res", + "barrel-of-knowledge", + "barrell-of-knowledge", + "cloudns", + "dvrcam", + "dynamic-dns", + "dyndns", + "for-our", + "groks-the", + "groks-this", + "here-for-more", + "ilovecollege", + "knowsitall", + "no-ip", + "nsupdate", + "selfip", + "v-info", + "webhop", + "eu", + "backplaneapp", + "boxfuse", + "browsersafetymark", + "com", + "dedyn", + "definima", + "drud", + "enonic", + "github", + "gitlab", + "hasura-app", + "hzc", + "lair", + "ngrok", + "nid", + "nodeart", + "nodum", + "pantheonsite", + "protonet", + "resindevice", + "resinstaging", + "s5y", + "sandcats", + "shiftedit", + "spacekit", + "stolos", + "thingdust", + "vaporcloud", + "wedeploy", + "customer", + "apps", + "stage", + "devices", + "dev", + "disrec", + "prod", + "testing", + "cust", + "cust", + "cust", + "cust", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "ac", + "co", + "gov", + "id", + "net", + "org", + "sch", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "blogspot", + "com", + "cupcake", + "edu", + "gov", + "int", + "net", + "org", + "abr", + "abruzzo", + "ag", + "agrigento", + "al", + "alessandria", + "alto-adige", + "altoadige", + "an", + "ancona", + "andria-barletta-trani", + "andria-trani-barletta", + "andriabarlettatrani", + "andriatranibarletta", + "ao", + "aosta", + "aosta-valley", + "aostavalley", + "aoste", + "ap", + "aq", + "aquila", + "ar", + "arezzo", + "ascoli-piceno", + "ascolipiceno", + "asti", + "at", + "av", + "avellino", + "ba", + "balsan", + "bari", + "barletta-trani-andria", + "barlettatraniandria", + "bas", + "basilicata", + "belluno", + "benevento", + "bergamo", + "bg", + "bi", + "biella", + "bl", + "blogspot", + "bn", + "bo", + "bologna", + "bolzano", + "bozen", + "br", + "brescia", + "brindisi", + "bs", + "bt", + "bz", + "ca", + "cagliari", + "cal", + "calabria", + "caltanissetta", + "cam", + "campania", + "campidano-medio", + "campidanomedio", + "campobasso", + "carbonia-iglesias", + "carboniaiglesias", + "carrara-massa", + "carraramassa", + "caserta", + "catania", + "catanzaro", + "cb", + "ce", + "cesena-forli", + "cesenaforli", + "ch", + "chieti", + "ci", + "cl", + "cn", + "co", + "como", + "cosenza", + "cr", + "cremona", + "crotone", + "cs", + "ct", + "cuneo", + "cz", + "dell-ogliastra", + "dellogliastra", + "edu", + "emilia-romagna", + "emiliaromagna", + "emr", + "en", + "enna", + "fc", + "fe", + "fermo", + "ferrara", + "fg", + "fi", + "firenze", + "florence", + "fm", + "foggia", + "forli-cesena", + "forlicesena", + "fr", + "friuli-v-giulia", + "friuli-ve-giulia", + "friuli-vegiulia", + "friuli-venezia-giulia", + "friuli-veneziagiulia", + "friuli-vgiulia", + "friuliv-giulia", + "friulive-giulia", + "friulivegiulia", + "friulivenezia-giulia", + "friuliveneziagiulia", + "friulivgiulia", + "frosinone", + "fvg", + "ge", + "genoa", + "genova", + "go", + "gorizia", + "gov", + "gr", + "grosseto", + "iglesias-carbonia", + "iglesiascarbonia", + "im", + "imperia", + "is", + "isernia", + "kr", + "la-spezia", + "laquila", + "laspezia", + "latina", + "laz", + "lazio", + "lc", + "le", + "lecce", + "lecco", + "li", + "lig", + "liguria", + "livorno", + "lo", + "lodi", + "lom", + "lombardia", + "lombardy", + "lt", + "lu", + "lucania", + "lucca", + "macerata", + "mantova", + "mar", + "marche", + "massa-carrara", + "massacarrara", + "matera", + "mb", + "mc", + "me", + "medio-campidano", + "mediocampidano", + "messina", + "mi", + "milan", + "milano", + "mn", + "mo", + "modena", + "mol", + "molise", + "monza", + "monza-brianza", + "monza-e-della-brianza", + "monzabrianza", + "monzaebrianza", + "monzaedellabrianza", + "ms", + "mt", + "na", + "naples", + "napoli", + "no", + "novara", + "nu", + "nuoro", + "og", + "ogliastra", + "olbia-tempio", + "olbiatempio", + "or", + "oristano", + "ot", + "pa", + "padova", + "padua", + "palermo", + "parma", + "pavia", + "pc", + "pd", + "pe", + "perugia", + "pesaro-urbino", + "pesarourbino", + "pescara", + "pg", + "pi", + "piacenza", + "piedmont", + "piemonte", + "pisa", + "pistoia", + "pmn", + "pn", + "po", + "pordenone", + "potenza", + "pr", + "prato", + "pt", + "pu", + "pug", + "puglia", + "pv", + "pz", + "ra", + "ragusa", + "ravenna", + "rc", + "re", + "reggio-calabria", + "reggio-emilia", + "reggiocalabria", + "reggioemilia", + "rg", + "ri", + "rieti", + "rimini", + "rm", + "rn", + "ro", + "roma", + "rome", + "rovigo", + "sa", + "salerno", + "sar", + "sardegna", + "sardinia", + "sassari", + "savona", + "si", + "sic", + "sicilia", + "sicily", + "siena", + "siracusa", + "so", + "sondrio", + "sp", + "sr", + "ss", + "suedtirol", + "sv", + "ta", + "taa", + "taranto", + "te", + "tempio-olbia", + "tempioolbia", + "teramo", + "terni", + "tn", + "to", + "torino", + "tos", + "toscana", + "tp", + "tr", + "trani-andria-barletta", + "trani-barletta-andria", + "traniandriabarletta", + "tranibarlettaandria", + "trapani", + "trentino", + "trentino-a-adige", + "trentino-aadige", + "trentino-alto-adige", + "trentino-altoadige", + "trentino-s-tirol", + "trentino-stirol", + "trentino-sud-tirol", + "trentino-sudtirol", + "trentino-sued-tirol", + "trentino-suedtirol", + "trentinoa-adige", + "trentinoaadige", + "trentinoalto-adige", + "trentinoaltoadige", + "trentinos-tirol", + "trentinostirol", + "trentinosud-tirol", + "trentinosudtirol", + "trentinosued-tirol", + "trentinosuedtirol", + "trento", + "treviso", + "trieste", + "ts", + "turin", + "tuscany", + "tv", + "ud", + "udine", + "umb", + "umbria", + "urbino-pesaro", + "urbinopesaro", + "va", + "val-d-aosta", + "val-daosta", + "vald-aosta", + "valdaosta", + "valle-aosta", + "valle-d-aosta", + "valle-daosta", + "valleaosta", + "valled-aosta", + "valledaosta", + "vallee-aoste", + "valleeaoste", + "vao", + "varese", + "vb", + "vc", + "vda", + "ve", + "ven", + "veneto", + "venezia", + "venice", + "verbania", + "vercelli", + "verona", + "vi", + "vibo-valentia", + "vibovalentia", + "vicenza", + "viterbo", + "vr", + "vs", + "vt", + "vv", + "co", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "org", + "sch", + "ac", + "ad", + "aichi", + "akita", + "aomori", + "blogspot", + "chiba", + "co", + "ed", + "ehime", + "fukui", + "fukuoka", + "fukushima", + "gifu", + "go", + "gr", + "gunma", + "hiroshima", + "hokkaido", + "hyogo", + "ibaraki", + "ishikawa", + "iwate", + "kagawa", + "kagoshima", + "kanagawa", + "kawasaki", + "kitakyushu", + "kobe", + "kochi", + "kumamoto", + "kyoto", + "lg", + "mie", + "miyagi", + "miyazaki", + "nagano", + "nagasaki", + "nagoya", + "nara", + "ne", + "niigata", + "oita", + "okayama", + "okinawa", + "or", + "osaka", + "saga", + "saitama", + "sapporo", + "sendai", + "shiga", + "shimane", + "shizuoka", + "tochigi", + "tokushima", + "tokyo", + "tottori", + "toyama", + "wakayama", + "xn--0trq7p7nn", + "xn--1ctwo", + "xn--1lqs03n", + "xn--1lqs71d", + "xn--2m4a15e", + "xn--32vp30h", + "xn--4it168d", + "xn--4it797k", + "xn--4pvxs", + "xn--5js045d", + "xn--5rtp49c", + "xn--5rtq34k", + "xn--6btw5a", + "xn--6orx2r", + "xn--7t0a264c", + "xn--8ltr62k", + "xn--8pvr4u", + "xn--c3s14m", + "xn--d5qv7z876c", + "xn--djrs72d6uy", + "xn--djty4k", + "xn--efvn9s", + "xn--ehqz56n", + "xn--elqq16h", + "xn--f6qx53a", + "xn--k7yn95e", + "xn--kbrq7o", + "xn--klt787d", + "xn--kltp7d", + "xn--kltx9a", + "xn--klty5x", + "xn--mkru45i", + "xn--nit225k", + "xn--ntso0iqx3a", + "xn--ntsq17g", + "xn--pssu33l", + "xn--qqqt11m", + "xn--rht27z", + "xn--rht3d", + "xn--rht61e", + "xn--rny31h", + "xn--tor131o", + "xn--uist22h", + "xn--uisz3g", + "xn--uuwu58a", + "xn--vgu402c", + "xn--zbx025d", + "yamagata", + "yamaguchi", + "yamanashi", + "yokohama", + "aisai", + "ama", + "anjo", + "asuke", + "chiryu", + "chita", + "fuso", + "gamagori", + "handa", + "hazu", + "hekinan", + "higashiura", + "ichinomiya", + "inazawa", + "inuyama", + "isshiki", + "iwakura", + "kanie", + "kariya", + "kasugai", + "kira", + "kiyosu", + "komaki", + "konan", + "kota", + "mihama", + "miyoshi", + "nishio", + "nisshin", + "obu", + "oguchi", + "oharu", + "okazaki", + "owariasahi", + "seto", + "shikatsu", + "shinshiro", + "shitara", + "tahara", + "takahama", + "tobishima", + "toei", + "togo", + "tokai", + "tokoname", + "toyoake", + "toyohashi", + "toyokawa", + "toyone", + "toyota", + "tsushima", + "yatomi", + "akita", + "daisen", + "fujisato", + "gojome", + "hachirogata", + "happou", + "higashinaruse", + "honjo", + "honjyo", + "ikawa", + "kamikoani", + "kamioka", + "katagami", + "kazuno", + "kitaakita", + "kosaka", + "kyowa", + "misato", + "mitane", + "moriyoshi", + "nikaho", + "noshiro", + "odate", + "oga", + "ogata", + "semboku", + "yokote", + "yurihonjo", + "aomori", + "gonohe", + "hachinohe", + "hashikami", + "hiranai", + "hirosaki", + "itayanagi", + "kuroishi", + "misawa", + "mutsu", + "nakadomari", + "noheji", + "oirase", + "owani", + "rokunohe", + "sannohe", + "shichinohe", + "shingo", + "takko", + "towada", + "tsugaru", + "tsuruta", + "abiko", + "asahi", + "chonan", + "chosei", + "choshi", + "chuo", + "funabashi", + "futtsu", + "hanamigawa", + "ichihara", + "ichikawa", + "ichinomiya", + "inzai", + "isumi", + "kamagaya", + "kamogawa", + "kashiwa", + "katori", + "katsuura", + "kimitsu", + "kisarazu", + "kozaki", + "kujukuri", + "kyonan", + "matsudo", + "midori", + "mihama", + "minamiboso", + "mobara", + "mutsuzawa", + "nagara", + "nagareyama", + "narashino", + "narita", + "noda", + "oamishirasato", + "omigawa", + "onjuku", + "otaki", + "sakae", + "sakura", + "shimofusa", + "shirako", + "shiroi", + "shisui", + "sodegaura", + "sosa", + "tako", + "tateyama", + "togane", + "tohnosho", + "tomisato", + "urayasu", + "yachimata", + "yachiyo", + "yokaichiba", + "yokoshibahikari", + "yotsukaido", + "ainan", + "honai", + "ikata", + "imabari", + "iyo", + "kamijima", + "kihoku", + "kumakogen", + "masaki", + "matsuno", + "matsuyama", + "namikata", + "niihama", + "ozu", + "saijo", + "seiyo", + "shikokuchuo", + "tobe", + "toon", + "uchiko", + "uwajima", + "yawatahama", + "echizen", + "eiheiji", + "fukui", + "ikeda", + "katsuyama", + "mihama", + "minamiechizen", + "obama", + "ohi", + "ono", + "sabae", + "sakai", + "takahama", + "tsuruga", + "wakasa", + "ashiya", + "buzen", + "chikugo", + "chikuho", + "chikujo", + "chikushino", + "chikuzen", + "chuo", + "dazaifu", + "fukuchi", + "hakata", + "higashi", + "hirokawa", + "hisayama", + "iizuka", + "inatsuki", + "kaho", + "kasuga", + "kasuya", + "kawara", + "keisen", + "koga", + "kurate", + "kurogi", + "kurume", + "minami", + "miyako", + "miyama", + "miyawaka", + "mizumaki", + "munakata", + "nakagawa", + "nakama", + "nishi", + "nogata", + "ogori", + "okagaki", + "okawa", + "oki", + "omuta", + "onga", + "onojo", + "oto", + "saigawa", + "sasaguri", + "shingu", + "shinyoshitomi", + "shonai", + "soeda", + "sue", + "tachiarai", + "tagawa", + "takata", + "toho", + "toyotsu", + "tsuiki", + "ukiha", + "umi", + "usui", + "yamada", + "yame", + "yanagawa", + "yukuhashi", + "aizubange", + "aizumisato", + "aizuwakamatsu", + "asakawa", + "bandai", + "date", + "fukushima", + "furudono", + "futaba", + "hanawa", + "higashi", + "hirata", + "hirono", + "iitate", + "inawashiro", + "ishikawa", + "iwaki", + "izumizaki", + "kagamiishi", + "kaneyama", + "kawamata", + "kitakata", + "kitashiobara", + "koori", + "koriyama", + "kunimi", + "miharu", + "mishima", + "namie", + "nango", + "nishiaizu", + "nishigo", + "okuma", + "omotego", + "ono", + "otama", + "samegawa", + "shimogo", + "shirakawa", + "showa", + "soma", + "sukagawa", + "taishin", + "tamakawa", + "tanagura", + "tenei", + "yabuki", + "yamato", + "yamatsuri", + "yanaizu", + "yugawa", + "anpachi", + "ena", + "gifu", + "ginan", + "godo", + "gujo", + "hashima", + "hichiso", + "hida", + "higashishirakawa", + "ibigawa", + "ikeda", + "kakamigahara", + "kani", + "kasahara", + "kasamatsu", + "kawaue", + "kitagata", + "mino", + "minokamo", + "mitake", + "mizunami", + "motosu", + "nakatsugawa", + "ogaki", + "sakahogi", + "seki", + "sekigahara", + "shirakawa", + "tajimi", + "takayama", + "tarui", + "toki", + "tomika", + "wanouchi", + "yamagata", + "yaotsu", + "yoro", + "annaka", + "chiyoda", + "fujioka", + "higashiagatsuma", + "isesaki", + "itakura", + "kanna", + "kanra", + "katashina", + "kawaba", + "kiryu", + "kusatsu", + "maebashi", + "meiwa", + "midori", + "minakami", + "naganohara", + "nakanojo", + "nanmoku", + "numata", + "oizumi", + "ora", + "ota", + "shibukawa", + "shimonita", + "shinto", + "showa", + "takasaki", + "takayama", + "tamamura", + "tatebayashi", + "tomioka", + "tsukiyono", + "tsumagoi", + "ueno", + "yoshioka", + "asaminami", + "daiwa", + "etajima", + "fuchu", + "fukuyama", + "hatsukaichi", + "higashihiroshima", + "hongo", + "jinsekikogen", + "kaita", + "kui", + "kumano", + "kure", + "mihara", + "miyoshi", + "naka", + "onomichi", + "osakikamijima", + "otake", + "saka", + "sera", + "seranishi", + "shinichi", + "shobara", + "takehara", + "abashiri", + "abira", + "aibetsu", + "akabira", + "akkeshi", + "asahikawa", + "ashibetsu", + "ashoro", + "assabu", + "atsuma", + "bibai", + "biei", + "bifuka", + "bihoro", + "biratori", + "chippubetsu", + "chitose", + "date", + "ebetsu", + "embetsu", + "eniwa", + "erimo", + "esan", + "esashi", + "fukagawa", + "fukushima", + "furano", + "furubira", + "haboro", + "hakodate", + "hamatonbetsu", + "hidaka", + "higashikagura", + "higashikawa", + "hiroo", + "hokuryu", + "hokuto", + "honbetsu", + "horokanai", + "horonobe", + "ikeda", + "imakane", + "ishikari", + "iwamizawa", + "iwanai", + "kamifurano", + "kamikawa", + "kamishihoro", + "kamisunagawa", + "kamoenai", + "kayabe", + "kembuchi", + "kikonai", + "kimobetsu", + "kitahiroshima", + "kitami", + "kiyosato", + "koshimizu", + "kunneppu", + "kuriyama", + "kuromatsunai", + "kushiro", + "kutchan", + "kyowa", + "mashike", + "matsumae", + "mikasa", + "minamifurano", + "mombetsu", + "moseushi", + "mukawa", + "muroran", + "naie", + "nakagawa", + "nakasatsunai", + "nakatombetsu", + "nanae", + "nanporo", + "nayoro", + "nemuro", + "niikappu", + "niki", + "nishiokoppe", + "noboribetsu", + "numata", + "obihiro", + "obira", + "oketo", + "okoppe", + "otaru", + "otobe", + "otofuke", + "otoineppu", + "oumu", + "ozora", + "pippu", + "rankoshi", + "rebun", + "rikubetsu", + "rishiri", + "rishirifuji", + "saroma", + "sarufutsu", + "shakotan", + "shari", + "shibecha", + "shibetsu", + "shikabe", + "shikaoi", + "shimamaki", + "shimizu", + "shimokawa", + "shinshinotsu", + "shintoku", + "shiranuka", + "shiraoi", + "shiriuchi", + "sobetsu", + "sunagawa", + "taiki", + "takasu", + "takikawa", + "takinoue", + "teshikaga", + "tobetsu", + "tohma", + "tomakomai", + "tomari", + "toya", + "toyako", + "toyotomi", + "toyoura", + "tsubetsu", + "tsukigata", + "urakawa", + "urausu", + "uryu", + "utashinai", + "wakkanai", + "wassamu", + "yakumo", + "yoichi", + "aioi", + "akashi", + "ako", + "amagasaki", + "aogaki", + "asago", + "ashiya", + "awaji", + "fukusaki", + "goshiki", + "harima", + "himeji", + "ichikawa", + "inagawa", + "itami", + "kakogawa", + "kamigori", + "kamikawa", + "kasai", + "kasuga", + "kawanishi", + "miki", + "minamiawaji", + "nishinomiya", + "nishiwaki", + "ono", + "sanda", + "sannan", + "sasayama", + "sayo", + "shingu", + "shinonsen", + "shiso", + "sumoto", + "taishi", + "taka", + "takarazuka", + "takasago", + "takino", + "tamba", + "tatsuno", + "toyooka", + "yabu", + "yashiro", + "yoka", + "yokawa", + "ami", + "asahi", + "bando", + "chikusei", + "daigo", + "fujishiro", + "hitachi", + "hitachinaka", + "hitachiomiya", + "hitachiota", + "ibaraki", + "ina", + "inashiki", + "itako", + "iwama", + "joso", + "kamisu", + "kasama", + "kashima", + "kasumigaura", + "koga", + "miho", + "mito", + "moriya", + "naka", + "namegata", + "oarai", + "ogawa", + "omitama", + "ryugasaki", + "sakai", + "sakuragawa", + "shimodate", + "shimotsuma", + "shirosato", + "sowa", + "suifu", + "takahagi", + "tamatsukuri", + "tokai", + "tomobe", + "tone", + "toride", + "tsuchiura", + "tsukuba", + "uchihara", + "ushiku", + "yachiyo", + "yamagata", + "yawara", + "yuki", + "anamizu", + "hakui", + "hakusan", + "kaga", + "kahoku", + "kanazawa", + "kawakita", + "komatsu", + "nakanoto", + "nanao", + "nomi", + "nonoichi", + "noto", + "shika", + "suzu", + "tsubata", + "tsurugi", + "uchinada", + "wajima", + "fudai", + "fujisawa", + "hanamaki", + "hiraizumi", + "hirono", + "ichinohe", + "ichinoseki", + "iwaizumi", + "iwate", + "joboji", + "kamaishi", + "kanegasaki", + "karumai", + "kawai", + "kitakami", + "kuji", + "kunohe", + "kuzumaki", + "miyako", + "mizusawa", + "morioka", + "ninohe", + "noda", + "ofunato", + "oshu", + "otsuchi", + "rikuzentakata", + "shiwa", + "shizukuishi", + "sumita", + "tanohata", + "tono", + "yahaba", + "yamada", + "ayagawa", + "higashikagawa", + "kanonji", + "kotohira", + "manno", + "marugame", + "mitoyo", + "naoshima", + "sanuki", + "tadotsu", + "takamatsu", + "tonosho", + "uchinomi", + "utazu", + "zentsuji", + "akune", + "amami", + "hioki", + "isa", + "isen", + "izumi", + "kagoshima", + "kanoya", + "kawanabe", + "kinko", + "kouyama", + "makurazaki", + "matsumoto", + "minamitane", + "nakatane", + "nishinoomote", + "satsumasendai", + "soo", + "tarumizu", + "yusui", + "aikawa", + "atsugi", + "ayase", + "chigasaki", + "ebina", + "fujisawa", + "hadano", + "hakone", + "hiratsuka", + "isehara", + "kaisei", + "kamakura", + "kiyokawa", + "matsuda", + "minamiashigara", + "miura", + "nakai", + "ninomiya", + "odawara", + "oi", + "oiso", + "sagamihara", + "samukawa", + "tsukui", + "yamakita", + "yamato", + "yokosuka", + "yugawara", + "zama", + "zushi", + "city", + "city", + "city", + "aki", + "geisei", + "hidaka", + "higashitsuno", + "ino", + "kagami", + "kami", + "kitagawa", + "kochi", + "mihara", + "motoyama", + "muroto", + "nahari", + "nakamura", + "nankoku", + "nishitosa", + "niyodogawa", + "ochi", + "okawa", + "otoyo", + "otsuki", + "sakawa", + "sukumo", + "susaki", + "tosa", + "tosashimizu", + "toyo", + "tsuno", + "umaji", + "yasuda", + "yusuhara", + "amakusa", + "arao", + "aso", + "choyo", + "gyokuto", + "kamiamakusa", + "kikuchi", + "kumamoto", + "mashiki", + "mifune", + "minamata", + "minamioguni", + "nagasu", + "nishihara", + "oguni", + "ozu", + "sumoto", + "takamori", + "uki", + "uto", + "yamaga", + "yamato", + "yatsushiro", + "ayabe", + "fukuchiyama", + "higashiyama", + "ide", + "ine", + "joyo", + "kameoka", + "kamo", + "kita", + "kizu", + "kumiyama", + "kyotamba", + "kyotanabe", + "kyotango", + "maizuru", + "minami", + "minamiyamashiro", + "miyazu", + "muko", + "nagaokakyo", + "nakagyo", + "nantan", + "oyamazaki", + "sakyo", + "seika", + "tanabe", + "uji", + "ujitawara", + "wazuka", + "yamashina", + "yawata", + "asahi", + "inabe", + "ise", + "kameyama", + "kawagoe", + "kiho", + "kisosaki", + "kiwa", + "komono", + "kumano", + "kuwana", + "matsusaka", + "meiwa", + "mihama", + "minamiise", + "misugi", + "miyama", + "nabari", + "shima", + "suzuka", + "tado", + "taiki", + "taki", + "tamaki", + "toba", + "tsu", + "udono", + "ureshino", + "watarai", + "yokkaichi", + "furukawa", + "higashimatsushima", + "ishinomaki", + "iwanuma", + "kakuda", + "kami", + "kawasaki", + "marumori", + "matsushima", + "minamisanriku", + "misato", + "murata", + "natori", + "ogawara", + "ohira", + "onagawa", + "osaki", + "rifu", + "semine", + "shibata", + "shichikashuku", + "shikama", + "shiogama", + "shiroishi", + "tagajo", + "taiwa", + "tome", + "tomiya", + "wakuya", + "watari", + "yamamoto", + "zao", + "aya", + "ebino", + "gokase", + "hyuga", + "kadogawa", + "kawaminami", + "kijo", + "kitagawa", + "kitakata", + "kitaura", + "kobayashi", + "kunitomi", + "kushima", + "mimata", + "miyakonojo", + "miyazaki", + "morotsuka", + "nichinan", + "nishimera", + "nobeoka", + "saito", + "shiiba", + "shintomi", + "takaharu", + "takanabe", + "takazaki", + "tsuno", + "achi", + "agematsu", + "anan", + "aoki", + "asahi", + "azumino", + "chikuhoku", + "chikuma", + "chino", + "fujimi", + "hakuba", + "hara", + "hiraya", + "iida", + "iijima", + "iiyama", + "iizuna", + "ikeda", + "ikusaka", + "ina", + "karuizawa", + "kawakami", + "kiso", + "kisofukushima", + "kitaaiki", + "komagane", + "komoro", + "matsukawa", + "matsumoto", + "miasa", + "minamiaiki", + "minamimaki", + "minamiminowa", + "minowa", + "miyada", + "miyota", + "mochizuki", + "nagano", + "nagawa", + "nagiso", + "nakagawa", + "nakano", + "nozawaonsen", + "obuse", + "ogawa", + "okaya", + "omachi", + "omi", + "ookuwa", + "ooshika", + "otaki", + "otari", + "sakae", + "sakaki", + "saku", + "sakuho", + "shimosuwa", + "shinanomachi", + "shiojiri", + "suwa", + "suzaka", + "takagi", + "takamori", + "takayama", + "tateshina", + "tatsuno", + "togakushi", + "togura", + "tomi", + "ueda", + "wada", + "yamagata", + "yamanouchi", + "yasaka", + "yasuoka", + "chijiwa", + "futsu", + "goto", + "hasami", + "hirado", + "iki", + "isahaya", + "kawatana", + "kuchinotsu", + "matsuura", + "nagasaki", + "obama", + "omura", + "oseto", + "saikai", + "sasebo", + "seihi", + "shimabara", + "shinkamigoto", + "togitsu", + "tsushima", + "unzen", + "city", + "ando", + "gose", + "heguri", + "higashiyoshino", + "ikaruga", + "ikoma", + "kamikitayama", + "kanmaki", + "kashiba", + "kashihara", + "katsuragi", + "kawai", + "kawakami", + "kawanishi", + "koryo", + "kurotaki", + "mitsue", + "miyake", + "nara", + "nosegawa", + "oji", + "ouda", + "oyodo", + "sakurai", + "sango", + "shimoichi", + "shimokitayama", + "shinjo", + "soni", + "takatori", + "tawaramoto", + "tenkawa", + "tenri", + "uda", + "yamatokoriyama", + "yamatotakada", + "yamazoe", + "yoshino", + "aga", + "agano", + "gosen", + "itoigawa", + "izumozaki", + "joetsu", + "kamo", + "kariwa", + "kashiwazaki", + "minamiuonuma", + "mitsuke", + "muika", + "murakami", + "myoko", + "nagaoka", + "niigata", + "ojiya", + "omi", + "sado", + "sanjo", + "seiro", + "seirou", + "sekikawa", + "shibata", + "tagami", + "tainai", + "tochio", + "tokamachi", + "tsubame", + "tsunan", + "uonuma", + "yahiko", + "yoita", + "yuzawa", + "beppu", + "bungoono", + "bungotakada", + "hasama", + "hiji", + "himeshima", + "hita", + "kamitsue", + "kokonoe", + "kuju", + "kunisaki", + "kusu", + "oita", + "saiki", + "taketa", + "tsukumi", + "usa", + "usuki", + "yufu", + "akaiwa", + "asakuchi", + "bizen", + "hayashima", + "ibara", + "kagamino", + "kasaoka", + "kibichuo", + "kumenan", + "kurashiki", + "maniwa", + "misaki", + "nagi", + "niimi", + "nishiawakura", + "okayama", + "satosho", + "setouchi", + "shinjo", + "shoo", + "soja", + "takahashi", + "tamano", + "tsuyama", + "wake", + "yakage", + "aguni", + "ginowan", + "ginoza", + "gushikami", + "haebaru", + "higashi", + "hirara", + "iheya", + "ishigaki", + "ishikawa", + "itoman", + "izena", + "kadena", + "kin", + "kitadaito", + "kitanakagusuku", + "kumejima", + "kunigami", + "minamidaito", + "motobu", + "nago", + "naha", + "nakagusuku", + "nakijin", + "nanjo", + "nishihara", + "ogimi", + "okinawa", + "onna", + "shimoji", + "taketomi", + "tarama", + "tokashiki", + "tomigusuku", + "tonaki", + "urasoe", + "uruma", + "yaese", + "yomitan", + "yonabaru", + "yonaguni", + "zamami", + "abeno", + "chihayaakasaka", + "chuo", + "daito", + "fujiidera", + "habikino", + "hannan", + "higashiosaka", + "higashisumiyoshi", + "higashiyodogawa", + "hirakata", + "ibaraki", + "ikeda", + "izumi", + "izumiotsu", + "izumisano", + "kadoma", + "kaizuka", + "kanan", + "kashiwara", + "katano", + "kawachinagano", + "kishiwada", + "kita", + "kumatori", + "matsubara", + "minato", + "minoh", + "misaki", + "moriguchi", + "neyagawa", + "nishi", + "nose", + "osakasayama", + "sakai", + "sayama", + "sennan", + "settsu", + "shijonawate", + "shimamoto", + "suita", + "tadaoka", + "taishi", + "tajiri", + "takaishi", + "takatsuki", + "tondabayashi", + "toyonaka", + "toyono", + "yao", + "ariake", + "arita", + "fukudomi", + "genkai", + "hamatama", + "hizen", + "imari", + "kamimine", + "kanzaki", + "karatsu", + "kashima", + "kitagata", + "kitahata", + "kiyama", + "kouhoku", + "kyuragi", + "nishiarita", + "ogi", + "omachi", + "ouchi", + "saga", + "shiroishi", + "taku", + "tara", + "tosu", + "yoshinogari", + "arakawa", + "asaka", + "chichibu", + "fujimi", + "fujimino", + "fukaya", + "hanno", + "hanyu", + "hasuda", + "hatogaya", + "hatoyama", + "hidaka", + "higashichichibu", + "higashimatsuyama", + "honjo", + "ina", + "iruma", + "iwatsuki", + "kamiizumi", + "kamikawa", + "kamisato", + "kasukabe", + "kawagoe", + "kawaguchi", + "kawajima", + "kazo", + "kitamoto", + "koshigaya", + "kounosu", + "kuki", + "kumagaya", + "matsubushi", + "minano", + "misato", + "miyashiro", + "miyoshi", + "moroyama", + "nagatoro", + "namegawa", + "niiza", + "ogano", + "ogawa", + "ogose", + "okegawa", + "omiya", + "otaki", + "ranzan", + "ryokami", + "saitama", + "sakado", + "satte", + "sayama", + "shiki", + "shiraoka", + "soka", + "sugito", + "toda", + "tokigawa", + "tokorozawa", + "tsurugashima", + "urawa", + "warabi", + "yashio", + "yokoze", + "yono", + "yorii", + "yoshida", + "yoshikawa", + "yoshimi", + "city", + "city", + "aisho", + "gamo", + "higashiomi", + "hikone", + "koka", + "konan", + "kosei", + "koto", + "kusatsu", + "maibara", + "moriyama", + "nagahama", + "nishiazai", + "notogawa", + "omihachiman", + "otsu", + "ritto", + "ryuoh", + "takashima", + "takatsuki", + "torahime", + "toyosato", + "yasu", + "akagi", + "ama", + "gotsu", + "hamada", + "higashiizumo", + "hikawa", + "hikimi", + "izumo", + "kakinoki", + "masuda", + "matsue", + "misato", + "nishinoshima", + "ohda", + "okinoshima", + "okuizumo", + "shimane", + "tamayu", + "tsuwano", + "unnan", + "yakumo", + "yasugi", + "yatsuka", + "arai", + "atami", + "fuji", + "fujieda", + "fujikawa", + "fujinomiya", + "fukuroi", + "gotemba", + "haibara", + "hamamatsu", + "higashiizu", + "ito", + "iwata", + "izu", + "izunokuni", + "kakegawa", + "kannami", + "kawanehon", + "kawazu", + "kikugawa", + "kosai", + "makinohara", + "matsuzaki", + "minamiizu", + "mishima", + "morimachi", + "nishiizu", + "numazu", + "omaezaki", + "shimada", + "shimizu", + "shimoda", + "shizuoka", + "susono", + "yaizu", + "yoshida", + "ashikaga", + "bato", + "haga", + "ichikai", + "iwafune", + "kaminokawa", + "kanuma", + "karasuyama", + "kuroiso", + "mashiko", + "mibu", + "moka", + "motegi", + "nasu", + "nasushiobara", + "nikko", + "nishikata", + "nogi", + "ohira", + "ohtawara", + "oyama", + "sakura", + "sano", + "shimotsuke", + "shioya", + "takanezawa", + "tochigi", + "tsuga", + "ujiie", + "utsunomiya", + "yaita", + "aizumi", + "anan", + "ichiba", + "itano", + "kainan", + "komatsushima", + "matsushige", + "mima", + "minami", + "miyoshi", + "mugi", + "nakagawa", + "naruto", + "sanagochi", + "shishikui", + "tokushima", + "wajiki", + "adachi", + "akiruno", + "akishima", + "aogashima", + "arakawa", + "bunkyo", + "chiyoda", + "chofu", + "chuo", + "edogawa", + "fuchu", + "fussa", + "hachijo", + "hachioji", + "hamura", + "higashikurume", + "higashimurayama", + "higashiyamato", + "hino", + "hinode", + "hinohara", + "inagi", + "itabashi", + "katsushika", + "kita", + "kiyose", + "kodaira", + "koganei", + "kokubunji", + "komae", + "koto", + "kouzushima", + "kunitachi", + "machida", + "meguro", + "minato", + "mitaka", + "mizuho", + "musashimurayama", + "musashino", + "nakano", + "nerima", + "ogasawara", + "okutama", + "ome", + "oshima", + "ota", + "setagaya", + "shibuya", + "shinagawa", + "shinjuku", + "suginami", + "sumida", + "tachikawa", + "taito", + "tama", + "toshima", + "chizu", + "hino", + "kawahara", + "koge", + "kotoura", + "misasa", + "nanbu", + "nichinan", + "sakaiminato", + "tottori", + "wakasa", + "yazu", + "yonago", + "asahi", + "fuchu", + "fukumitsu", + "funahashi", + "himi", + "imizu", + "inami", + "johana", + "kamiichi", + "kurobe", + "nakaniikawa", + "namerikawa", + "nanto", + "nyuzen", + "oyabe", + "taira", + "takaoka", + "tateyama", + "toga", + "tonami", + "toyama", + "unazuki", + "uozu", + "yamada", + "arida", + "aridagawa", + "gobo", + "hashimoto", + "hidaka", + "hirogawa", + "inami", + "iwade", + "kainan", + "kamitonda", + "katsuragi", + "kimino", + "kinokawa", + "kitayama", + "koya", + "koza", + "kozagawa", + "kudoyama", + "kushimoto", + "mihama", + "misato", + "nachikatsuura", + "shingu", + "shirahama", + "taiji", + "tanabe", + "wakayama", + "yuasa", + "yura", + "asahi", + "funagata", + "higashine", + "iide", + "kahoku", + "kaminoyama", + "kaneyama", + "kawanishi", + "mamurogawa", + "mikawa", + "murayama", + "nagai", + "nakayama", + "nanyo", + "nishikawa", + "obanazawa", + "oe", + "oguni", + "ohkura", + "oishida", + "sagae", + "sakata", + "sakegawa", + "shinjo", + "shirataka", + "shonai", + "takahata", + "tendo", + "tozawa", + "tsuruoka", + "yamagata", + "yamanobe", + "yonezawa", + "yuza", + "abu", + "hagi", + "hikari", + "hofu", + "iwakuni", + "kudamatsu", + "mitou", + "nagato", + "oshima", + "shimonoseki", + "shunan", + "tabuse", + "tokuyama", + "toyota", + "ube", + "yuu", + "chuo", + "doshi", + "fuefuki", + "fujikawa", + "fujikawaguchiko", + "fujiyoshida", + "hayakawa", + "hokuto", + "ichikawamisato", + "kai", + "kofu", + "koshu", + "kosuge", + "minami-alps", + "minobu", + "nakamichi", + "nanbu", + "narusawa", + "nirasaki", + "nishikatsura", + "oshino", + "otsuki", + "showa", + "tabayama", + "tsuru", + "uenohara", + "yamanakako", + "yamanashi", + "city", + "ac", + "co", + "go", + "info", + "me", + "mobi", + "ne", + "or", + "sc", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "biz", + "com", + "edu", + "gov", + "info", + "net", + "org", + "ass", + "asso", + "com", + "coop", + "edu", + "gouv", + "gov", + "medecin", + "mil", + "nom", + "notaires", + "org", + "pharmaciens", + "prd", + "presse", + "tm", + "veterinaire", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "org", + "rep", + "tra", + "ac", + "blogspot", + "busan", + "chungbuk", + "chungnam", + "co", + "daegu", + "daejeon", + "es", + "gangwon", + "go", + "gwangju", + "gyeongbuk", + "gyeonggi", + "gyeongnam", + "hs", + "incheon", + "jeju", + "jeonbuk", + "jeonnam", + "kg", + "mil", + "ms", + "ne", + "or", + "pe", + "re", + "sc", + "seoul", + "ulsan", + "co", + "edu", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "net", + "nym", + "org", + "bnr", + "c", + "com", + "edu", + "gov", + "info", + "int", + "net", + "nym", + "org", + "per", + "static", + "dev", + "sites", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "edu", + "gov", + "net", + "org", + "oy", + "blogspot", + "nom", + "nym", + "cyon", + "mypep", + "ac", + "assn", + "com", + "edu", + "gov", + "grp", + "hotel", + "int", + "ltd", + "net", + "ngo", + "org", + "sch", + "soc", + "web", + "com", + "edu", + "gov", + "net", + "org", + "co", + "org", + "blogspot", + "gov", + "nym", + "blogspot", + "nym", + "asn", + "com", + "conf", + "edu", + "gov", + "id", + "mil", + "net", + "org", + "com", + "edu", + "gov", + "id", + "med", + "net", + "org", + "plc", + "sch", + "ac", + "co", + "gov", + "net", + "org", + "press", + "router", + "asso", + "tm", + "blogspot", + "ac", + "brasilia", + "c66", + "co", + "daplie", + "ddns", + "diskstation", + "dnsfor", + "dscloud", + "edu", + "filegear", + "gov", + "hopto", + "i234", + "its", + "loginto", + "myds", + "net", + "noip", + "nym", + "org", + "priv", + "synology", + "webhop", + "wedeploy", + "yombo", + "localhost", + "co", + "com", + "edu", + "gov", + "mil", + "nom", + "org", + "prd", + "tm", + "blogspot", + "com", + "edu", + "gov", + "inf", + "name", + "net", + "nom", + "org", + "com", + "edu", + "gouv", + "gov", + "net", + "org", + "presse", + "edu", + "gov", + "nyc", + "org", + "com", + "edu", + "gov", + "net", + "org", + "dscloud", + "blogspot", + "gov", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "net", + "org", + "blogspot", + "ac", + "co", + "com", + "gov", + "net", + "or", + "org", + "academy", + "agriculture", + "air", + "airguard", + "alabama", + "alaska", + "amber", + "ambulance", + "american", + "americana", + "americanantiques", + "americanart", + "amsterdam", + "and", + "annefrank", + "anthro", + "anthropology", + "antiques", + "aquarium", + "arboretum", + "archaeological", + "archaeology", + "architecture", + "art", + "artanddesign", + "artcenter", + "artdeco", + "arteducation", + "artgallery", + "arts", + "artsandcrafts", + "asmatart", + "assassination", + "assisi", + "association", + "astronomy", + "atlanta", + "austin", + "australia", + "automotive", + "aviation", + "axis", + "badajoz", + "baghdad", + "bahn", + "bale", + "baltimore", + "barcelona", + "baseball", + "basel", + "baths", + "bauern", + "beauxarts", + "beeldengeluid", + "bellevue", + "bergbau", + "berkeley", + "berlin", + "bern", + "bible", + "bilbao", + "bill", + "birdart", + "birthplace", + "bonn", + "boston", + "botanical", + "botanicalgarden", + "botanicgarden", + "botany", + "brandywinevalley", + "brasil", + "bristol", + "british", + "britishcolumbia", + "broadcast", + "brunel", + "brussel", + "brussels", + "bruxelles", + "building", + "burghof", + "bus", + "bushey", + "cadaques", + "california", + "cambridge", + "can", + "canada", + "capebreton", + "carrier", + "cartoonart", + "casadelamoneda", + "castle", + "castres", + "celtic", + "center", + "chattanooga", + "cheltenham", + "chesapeakebay", + "chicago", + "children", + "childrens", + "childrensgarden", + "chiropractic", + "chocolate", + "christiansburg", + "cincinnati", + "cinema", + "circus", + "civilisation", + "civilization", + "civilwar", + "clinton", + "clock", + "coal", + "coastaldefence", + "cody", + "coldwar", + "collection", + "colonialwilliamsburg", + "coloradoplateau", + "columbia", + "columbus", + "communication", + "communications", + "community", + "computer", + "computerhistory", + "contemporary", + "contemporaryart", + "convent", + "copenhagen", + "corporation", + "corvette", + "costume", + "countryestate", + "county", + "crafts", + "cranbrook", + "creation", + "cultural", + "culturalcenter", + "culture", + "cyber", + "cymru", + "dali", + "dallas", + "database", + "ddr", + "decorativearts", + "delaware", + "delmenhorst", + "denmark", + "depot", + "design", + "detroit", + "dinosaur", + "discovery", + "dolls", + "donostia", + "durham", + "eastafrica", + "eastcoast", + "education", + "educational", + "egyptian", + "eisenbahn", + "elburg", + "elvendrell", + "embroidery", + "encyclopedic", + "england", + "entomology", + "environment", + "environmentalconservation", + "epilepsy", + "essex", + "estate", + "ethnology", + "exeter", + "exhibition", + "family", + "farm", + "farmequipment", + "farmers", + "farmstead", + "field", + "figueres", + "filatelia", + "film", + "fineart", + "finearts", + "finland", + "flanders", + "florida", + "force", + "fortmissoula", + "fortworth", + "foundation", + "francaise", + "frankfurt", + "franziskaner", + "freemasonry", + "freiburg", + "fribourg", + "frog", + "fundacio", + "furniture", + "gallery", + "garden", + "gateway", + "geelvinck", + "gemological", + "geology", + "georgia", + "giessen", + "glas", + "glass", + "gorge", + "grandrapids", + "graz", + "guernsey", + "halloffame", + "hamburg", + "handson", + "harvestcelebration", + "hawaii", + "health", + "heimatunduhren", + "hellas", + "helsinki", + "hembygdsforbund", + "heritage", + "histoire", + "historical", + "historicalsociety", + "historichouses", + "historisch", + "historisches", + "history", + "historyofscience", + "horology", + "house", + "humanities", + "illustration", + "imageandsound", + "indian", + "indiana", + "indianapolis", + "indianmarket", + "intelligence", + "interactive", + "iraq", + "iron", + "isleofman", + "jamison", + "jefferson", + "jerusalem", + "jewelry", + "jewish", + "jewishart", + "jfk", + "journalism", + "judaica", + "judygarland", + "juedisches", + "juif", + "karate", + "karikatur", + "kids", + "koebenhavn", + "koeln", + "kunst", + "kunstsammlung", + "kunstunddesign", + "labor", + "labour", + "lajolla", + "lancashire", + "landes", + "lans", + "larsson", + "lewismiller", + "lincoln", + "linz", + "living", + "livinghistory", + "localhistory", + "london", + "losangeles", + "louvre", + "loyalist", + "lucerne", + "luxembourg", + "luzern", + "mad", + "madrid", + "mallorca", + "manchester", + "mansion", + "mansions", + "manx", + "marburg", + "maritime", + "maritimo", + "maryland", + "marylhurst", + "media", + "medical", + "medizinhistorisches", + "meeres", + "memorial", + "mesaverde", + "michigan", + "midatlantic", + "military", + "mill", + "miners", + "mining", + "minnesota", + "missile", + "missoula", + "modern", + "moma", + "money", + "monmouth", + "monticello", + "montreal", + "moscow", + "motorcycle", + "muenchen", + "muenster", + "mulhouse", + "muncie", + "museet", + "museumcenter", + "museumvereniging", + "music", + "national", + "nationalfirearms", + "nationalheritage", + "nativeamerican", + "naturalhistory", + "naturalhistorymuseum", + "naturalsciences", + "nature", + "naturhistorisches", + "natuurwetenschappen", + "naumburg", + "naval", + "nebraska", + "neues", + "newhampshire", + "newjersey", + "newmexico", + "newport", + "newspaper", + "newyork", + "niepce", + "norfolk", + "north", + "nrw", + "nuernberg", + "nuremberg", + "nyc", + "nyny", + "oceanographic", + "oceanographique", + "omaha", + "online", + "ontario", + "openair", + "oregon", + "oregontrail", + "otago", + "oxford", + "pacific", + "paderborn", + "palace", + "paleo", + "palmsprings", + "panama", + "paris", + "pasadena", + "pharmacy", + "philadelphia", + "philadelphiaarea", + "philately", + "phoenix", + "photography", + "pilots", + "pittsburgh", + "planetarium", + "plantation", + "plants", + "plaza", + "portal", + "portland", + "portlligat", + "posts-and-telecommunications", + "preservation", + "presidio", + "press", + "project", + "public", + "pubol", + "quebec", + "railroad", + "railway", + "research", + "resistance", + "riodejaneiro", + "rochester", + "rockart", + "roma", + "russia", + "saintlouis", + "salem", + "salvadordali", + "salzburg", + "sandiego", + "sanfrancisco", + "santabarbara", + "santacruz", + "santafe", + "saskatchewan", + "satx", + "savannahga", + "schlesisches", + "schoenbrunn", + "schokoladen", + "school", + "schweiz", + "science", + "science-fiction", + "scienceandhistory", + "scienceandindustry", + "sciencecenter", + "sciencecenters", + "sciencehistory", + "sciences", + "sciencesnaturelles", + "scotland", + "seaport", + "settlement", + "settlers", + "shell", + "sherbrooke", + "sibenik", + "silk", + "ski", + "skole", + "society", + "sologne", + "soundandvision", + "southcarolina", + "southwest", + "space", + "spy", + "square", + "stadt", + "stalbans", + "starnberg", + "state", + "stateofdelaware", + "station", + "steam", + "steiermark", + "stjohn", + "stockholm", + "stpetersburg", + "stuttgart", + "suisse", + "surgeonshall", + "surrey", + "svizzera", + "sweden", + "sydney", + "tank", + "tcm", + "technology", + "telekommunikation", + "television", + "texas", + "textile", + "theater", + "time", + "timekeeping", + "topology", + "torino", + "touch", + "town", + "transport", + "tree", + "trolley", + "trust", + "trustee", + "uhren", + "ulm", + "undersea", + "university", + "usa", + "usantiques", + "usarts", + "uscountryestate", + "usculture", + "usdecorativearts", + "usgarden", + "ushistory", + "ushuaia", + "uslivinghistory", + "utah", + "uvic", + "valley", + "vantaa", + "versailles", + "viking", + "village", + "virginia", + "virtual", + "virtuel", + "vlaanderen", + "volkenkunde", + "wales", + "wallonie", + "war", + "washingtondc", + "watch-and-clock", + "watchandclock", + "western", + "westfalen", + "whaling", + "wildlife", + "williamsburg", + "windmill", + "workshop", + "xn--9dbhblg6di", + "xn--comunicaes-v6a2o", + "xn--correios-e-telecomunicaes-ghc29a", + "xn--h1aegh", + "xn--lns-qla", + "york", + "yorkshire", + "yosemite", + "youth", + "zoological", + "zoology", + "aero", + "biz", + "com", + "coop", + "edu", + "gov", + "info", + "int", + "mil", + "museum", + "name", + "net", + "org", + "pro", + "ac", + "biz", + "co", + "com", + "coop", + "edu", + "gov", + "int", + "museum", + "net", + "org", + "blogspot", + "com", + "edu", + "gob", + "net", + "nym", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "org", + "ac", + "adv", + "co", + "edu", + "gov", + "mil", + "net", + "org", + "ca", + "cc", + "co", + "com", + "dr", + "in", + "info", + "mobi", + "mx", + "name", + "or", + "org", + "pro", + "school", + "tv", + "us", + "ws", + "her", + "his", + "forgot", + "forgot", + "asso", + "nom", + "alwaysdata", + "at-band-camp", + "azure-mobile", + "azurewebsites", + "barsy", + "blogdns", + "boomla", + "bounceme", + "bplaced", + "broke-it", + "buyshouses", + "casacam", + "cdn77", + "cdn77-ssl", + "channelsdvr", + "cloudaccess", + "cloudapp", + "cloudfront", + "cloudfunctions", + "cryptonomic", + "ddns", + "debian", + "definima", + "dnsalias", + "dnsdojo", + "does-it", + "dontexist", + "dsmynas", + "dynalias", + "dynathome", + "dynu", + "dynv6", + "eating-organic", + "endofinternet", + "familyds", + "fastly", + "fastlylb", + "feste-ip", + "firewall-gateway", + "flynnhosting", + "from-az", + "from-co", + "from-la", + "from-ny", + "gb", + "gets-it", + "ham-radio-op", + "homeftp", + "homeip", + "homelinux", + "homeunix", + "hu", + "in", + "in-the-band", + "ipifony", + "is-a-chef", + "is-a-geek", + "isa-geek", + "jp", + "kicks-ass", + "knx-server", + "moonscale", + "mydissent", + "myeffect", + "myfritz", + "mymediapc", + "mypsx", + "mysecuritycamera", + "nhlfan", + "no-ip", + "office-on-the", + "pgafan", + "podzone", + "privatizehealthinsurance", + "rackmaze", + "redirectme", + "ru", + "scrapper-site", + "se", + "selfip", + "sells-it", + "servebbs", + "serveblog", + "serveftp", + "serveminecraft", + "square7", + "static-access", + "sytes", + "t3l3p0rt", + "thruhere", + "twmail", + "uk", + "webhop", + "za", + "r", + "freetls", + "map", + "prod", + "ssl", + "a", + "global", + "a", + "b", + "global", + "map", + "alces", + "arts", + "com", + "firm", + "info", + "net", + "other", + "per", + "rec", + "store", + "web", + "com", + "edu", + "gov", + "i", + "mil", + "mobi", + "name", + "net", + "org", + "sch", + "blogspot", + "ac", + "biz", + "co", + "com", + "edu", + "gob", + "in", + "info", + "int", + "mil", + "net", + "nom", + "org", + "web", + "blogspot", + "bv", + "cistron", + "co", + "demon", + "hosting-cluster", + "transurl", + "virtueeldomein", + "aa", + "aarborte", + "aejrie", + "afjord", + "agdenes", + "ah", + "akershus", + "aknoluokta", + "akrehamn", + "al", + "alaheadju", + "alesund", + "algard", + "alstahaug", + "alta", + "alvdal", + "amli", + "amot", + "andasuolo", + "andebu", + "andoy", + "ardal", + "aremark", + "arendal", + "arna", + "aseral", + "asker", + "askim", + "askoy", + "askvoll", + "asnes", + "audnedaln", + "aukra", + "aure", + "aurland", + "aurskog-holand", + "austevoll", + "austrheim", + "averoy", + "badaddja", + "bahcavuotna", + "bahccavuotna", + "baidar", + "bajddar", + "balat", + "balestrand", + "ballangen", + "balsfjord", + "bamble", + "bardu", + "barum", + "batsfjord", + "bearalvahki", + "beardu", + "beiarn", + "berg", + "bergen", + "berlevag", + "bievat", + "bindal", + "birkenes", + "bjarkoy", + "bjerkreim", + "bjugn", + "blogspot", + "bodo", + "bokn", + "bomlo", + "bremanger", + "bronnoy", + "bronnoysund", + "brumunddal", + "bryne", + "bu", + "budejju", + "buskerud", + "bygland", + "bykle", + "cahcesuolo", + "co", + "davvenjarga", + "davvesiida", + "deatnu", + "dep", + "dielddanuorri", + "divtasvuodna", + "divttasvuotna", + "donna", + "dovre", + "drammen", + "drangedal", + "drobak", + "dyroy", + "egersund", + "eid", + "eidfjord", + "eidsberg", + "eidskog", + "eidsvoll", + "eigersund", + "elverum", + "enebakk", + "engerdal", + "etne", + "etnedal", + "evenassi", + "evenes", + "evje-og-hornnes", + "farsund", + "fauske", + "fedje", + "fet", + "fetsund", + "fhs", + "finnoy", + "fitjar", + "fjaler", + "fjell", + "fla", + "flakstad", + "flatanger", + "flekkefjord", + "flesberg", + "flora", + "floro", + "fm", + "folkebibl", + "folldal", + "forde", + "forsand", + "fosnes", + "frana", + "fredrikstad", + "frei", + "frogn", + "froland", + "frosta", + "froya", + "fuoisku", + "fuossko", + "fusa", + "fylkesbibl", + "fyresdal", + "gaivuotna", + "galsa", + "gamvik", + "gangaviika", + "gaular", + "gausdal", + "giehtavuoatna", + "gildeskal", + "giske", + "gjemnes", + "gjerdrum", + "gjerstad", + "gjesdal", + "gjovik", + "gloppen", + "gol", + "gran", + "grane", + "granvin", + "gratangen", + "grimstad", + "grong", + "grue", + "gulen", + "guovdageaidnu", + "ha", + "habmer", + "hadsel", + "hagebostad", + "halden", + "halsa", + "hamar", + "hamaroy", + "hammarfeasta", + "hammerfest", + "hapmir", + "haram", + "hareid", + "harstad", + "hasvik", + "hattfjelldal", + "haugesund", + "hedmark", + "hemne", + "hemnes", + "hemsedal", + "herad", + "hitra", + "hjartdal", + "hjelmeland", + "hl", + "hm", + "hobol", + "hof", + "hokksund", + "hol", + "hole", + "holmestrand", + "holtalen", + "honefoss", + "hordaland", + "hornindal", + "horten", + "hoyanger", + "hoylandet", + "hurdal", + "hurum", + "hvaler", + "hyllestad", + "ibestad", + "idrett", + "inderoy", + "iveland", + "ivgu", + "jan-mayen", + "jessheim", + "jevnaker", + "jolster", + "jondal", + "jorpeland", + "kafjord", + "karasjohka", + "karasjok", + "karlsoy", + "karmoy", + "kautokeino", + "kirkenes", + "klabu", + "klepp", + "kommune", + "kongsberg", + "kongsvinger", + "kopervik", + "kraanghke", + "kragero", + "kristiansand", + "kristiansund", + "krodsherad", + "krokstadelva", + "kvafjord", + "kvalsund", + "kvam", + "kvanangen", + "kvinesdal", + "kvinnherad", + "kviteseid", + "kvitsoy", + "laakesvuemie", + "lahppi", + "langevag", + "lardal", + "larvik", + "lavagis", + "lavangen", + "leangaviika", + "lebesby", + "leikanger", + "leirfjord", + "leirvik", + "leka", + "leksvik", + "lenvik", + "lerdal", + "lesja", + "levanger", + "lier", + "lierne", + "lillehammer", + "lillesand", + "lindas", + "lindesnes", + "loabat", + "lodingen", + "lom", + "loppa", + "lorenskog", + "loten", + "lund", + "lunner", + "luroy", + "luster", + "lyngdal", + "lyngen", + "malatvuopmi", + "malselv", + "malvik", + "mandal", + "marker", + "marnardal", + "masfjorden", + "masoy", + "matta-varjjat", + "meland", + "meldal", + "melhus", + "meloy", + "meraker", + "midsund", + "midtre-gauldal", + "mil", + "mjondalen", + "mo-i-rana", + "moareke", + "modalen", + "modum", + "molde", + "more-og-romsdal", + "mosjoen", + "moskenes", + "moss", + "mosvik", + "mr", + "muosat", + "museum", + "naamesjevuemie", + "namdalseid", + "namsos", + "namsskogan", + "nannestad", + "naroy", + "narviika", + "narvik", + "naustdal", + "navuotna", + "nedre-eiker", + "nesna", + "nesodden", + "nesoddtangen", + "nesseby", + "nesset", + "nissedal", + "nittedal", + "nl", + "nord-aurdal", + "nord-fron", + "nord-odal", + "norddal", + "nordkapp", + "nordland", + "nordre-land", + "nordreisa", + "nore-og-uvdal", + "notodden", + "notteroy", + "nt", + "odda", + "of", + "oksnes", + "ol", + "omasvuotna", + "oppdal", + "oppegard", + "orkanger", + "orkdal", + "orland", + "orskog", + "orsta", + "osen", + "oslo", + "osoyro", + "osteroy", + "ostfold", + "ostre-toten", + "overhalla", + "ovre-eiker", + "oyer", + "oygarden", + "oystre-slidre", + "porsanger", + "porsangu", + "porsgrunn", + "priv", + "rade", + "radoy", + "rahkkeravju", + "raholt", + "raisa", + "rakkestad", + "ralingen", + "rana", + "randaberg", + "rauma", + "rendalen", + "rennebu", + "rennesoy", + "rindal", + "ringebu", + "ringerike", + "ringsaker", + "risor", + "rissa", + "rl", + "roan", + "rodoy", + "rollag", + "romsa", + "romskog", + "roros", + "rost", + "royken", + "royrvik", + "ruovat", + "rygge", + "salangen", + "salat", + "saltdal", + "samnanger", + "sandefjord", + "sandnes", + "sandnessjoen", + "sandoy", + "sarpsborg", + "sauda", + "sauherad", + "sel", + "selbu", + "selje", + "seljord", + "sf", + "siellak", + "sigdal", + "siljan", + "sirdal", + "skanit", + "skanland", + "skaun", + "skedsmo", + "skedsmokorset", + "ski", + "skien", + "skierva", + "skiptvet", + "skjak", + "skjervoy", + "skodje", + "slattum", + "smola", + "snaase", + "snasa", + "snillfjord", + "snoasa", + "sogndal", + "sogne", + "sokndal", + "sola", + "solund", + "somna", + "sondre-land", + "songdalen", + "sor-aurdal", + "sor-fron", + "sor-odal", + "sor-varanger", + "sorfold", + "sorreisa", + "sortland", + "sorum", + "spjelkavik", + "spydeberg", + "st", + "stange", + "stat", + "stathelle", + "stavanger", + "stavern", + "steigen", + "steinkjer", + "stjordal", + "stjordalshalsen", + "stokke", + "stor-elvdal", + "stord", + "stordal", + "storfjord", + "strand", + "stranda", + "stryn", + "sula", + "suldal", + "sund", + "sunndal", + "surnadal", + "svalbard", + "sveio", + "svelvik", + "sykkylven", + "tana", + "tananger", + "telemark", + "time", + "tingvoll", + "tinn", + "tjeldsund", + "tjome", + "tm", + "tokke", + "tolga", + "tonsberg", + "torsken", + "tr", + "trana", + "tranby", + "tranoy", + "troandin", + "trogstad", + "tromsa", + "tromso", + "trondheim", + "trysil", + "tvedestrand", + "tydal", + "tynset", + "tysfjord", + "tysnes", + "tysvar", + "ullensaker", + "ullensvang", + "ulvik", + "unjarga", + "utsira", + "va", + "vaapste", + "vadso", + "vaga", + "vagan", + "vagsoy", + "vaksdal", + "valle", + "vang", + "vanylven", + "vardo", + "varggat", + "varoy", + "vefsn", + "vega", + "vegarshei", + "vennesla", + "verdal", + "verran", + "vestby", + "vestfold", + "vestnes", + "vestre-slidre", + "vestre-toten", + "vestvagoy", + "vevelstad", + "vf", + "vgs", + "vik", + "vikna", + "vindafjord", + "voagat", + "volda", + "voss", + "vossevangen", + "xn--andy-ira", + "xn--asky-ira", + "xn--aurskog-hland-jnb", + "xn--avery-yua", + "xn--bdddj-mrabd", + "xn--bearalvhki-y4a", + "xn--berlevg-jxa", + "xn--bhcavuotna-s4a", + "xn--bhccavuotna-k7a", + "xn--bidr-5nac", + "xn--bievt-0qa", + "xn--bjarky-fya", + "xn--bjddar-pta", + "xn--blt-elab", + "xn--bmlo-gra", + "xn--bod-2na", + "xn--brnny-wuac", + "xn--brnnysund-m8ac", + "xn--brum-voa", + "xn--btsfjord-9za", + "xn--davvenjrga-y4a", + "xn--dnna-gra", + "xn--drbak-wua", + "xn--dyry-ira", + "xn--eveni-0qa01ga", + "xn--finny-yua", + "xn--fjord-lra", + "xn--fl-zia", + "xn--flor-jra", + "xn--frde-gra", + "xn--frna-woa", + "xn--frya-hra", + "xn--ggaviika-8ya47h", + "xn--gildeskl-g0a", + "xn--givuotna-8ya", + "xn--gjvik-wua", + "xn--gls-elac", + "xn--h-2fa", + "xn--hbmer-xqa", + "xn--hcesuolo-7ya35b", + "xn--hgebostad-g3a", + "xn--hmmrfeasta-s4ac", + "xn--hnefoss-q1a", + "xn--hobl-ira", + "xn--holtlen-hxa", + "xn--hpmir-xqa", + "xn--hyanger-q1a", + "xn--hylandet-54a", + "xn--indery-fya", + "xn--jlster-bya", + "xn--jrpeland-54a", + "xn--karmy-yua", + "xn--kfjord-iua", + "xn--klbu-woa", + "xn--koluokta-7ya57h", + "xn--krager-gya", + "xn--kranghke-b0a", + "xn--krdsherad-m8a", + "xn--krehamn-dxa", + "xn--krjohka-hwab49j", + "xn--ksnes-uua", + "xn--kvfjord-nxa", + "xn--kvitsy-fya", + "xn--kvnangen-k0a", + "xn--l-1fa", + "xn--laheadju-7ya", + "xn--langevg-jxa", + "xn--ldingen-q1a", + "xn--leagaviika-52b", + "xn--lesund-hua", + "xn--lgrd-poac", + "xn--lhppi-xqa", + "xn--linds-pra", + "xn--loabt-0qa", + "xn--lrdal-sra", + "xn--lrenskog-54a", + "xn--lt-liac", + "xn--lten-gra", + "xn--lury-ira", + "xn--mely-ira", + "xn--merker-kua", + "xn--mjndalen-64a", + "xn--mlatvuopmi-s4a", + "xn--mli-tla", + "xn--mlselv-iua", + "xn--moreke-jua", + "xn--mosjen-eya", + "xn--mot-tla", + "xn--mre-og-romsdal-qqb", + "xn--msy-ula0h", + "xn--mtta-vrjjat-k7af", + "xn--muost-0qa", + "xn--nmesjevuemie-tcba", + "xn--nry-yla5g", + "xn--nttery-byae", + "xn--nvuotna-hwa", + "xn--oppegrd-ixa", + "xn--ostery-fya", + "xn--osyro-wua", + "xn--porsgu-sta26f", + "xn--rady-ira", + "xn--rdal-poa", + "xn--rde-ula", + "xn--rdy-0nab", + "xn--rennesy-v1a", + "xn--rhkkervju-01af", + "xn--rholt-mra", + "xn--risa-5na", + "xn--risr-ira", + "xn--rland-uua", + "xn--rlingen-mxa", + "xn--rmskog-bya", + "xn--rros-gra", + "xn--rskog-uua", + "xn--rst-0na", + "xn--rsta-fra", + "xn--ryken-vua", + "xn--ryrvik-bya", + "xn--s-1fa", + "xn--sandnessjen-ogb", + "xn--sandy-yua", + "xn--seral-lra", + "xn--sgne-gra", + "xn--skierv-uta", + "xn--skjervy-v1a", + "xn--skjk-soa", + "xn--sknit-yqa", + "xn--sknland-fxa", + "xn--slat-5na", + "xn--slt-elab", + "xn--smla-hra", + "xn--smna-gra", + "xn--snase-nra", + "xn--sndre-land-0cb", + "xn--snes-poa", + "xn--snsa-roa", + "xn--sr-aurdal-l8a", + "xn--sr-fron-q1a", + "xn--sr-odal-q1a", + "xn--sr-varanger-ggb", + "xn--srfold-bya", + "xn--srreisa-q1a", + "xn--srum-gra", + "xn--stfold-9xa", + "xn--stjrdal-s1a", + "xn--stjrdalshalsen-sqb", + "xn--stre-toten-zcb", + "xn--tjme-hra", + "xn--tnsberg-q1a", + "xn--trany-yua", + "xn--trgstad-r1a", + "xn--trna-woa", + "xn--troms-zua", + "xn--tysvr-vra", + "xn--unjrga-rta", + "xn--vads-jra", + "xn--vard-jra", + "xn--vegrshei-c0a", + "xn--vestvgy-ixa6o", + "xn--vg-yiab", + "xn--vgan-qoa", + "xn--vgsy-qoa0j", + "xn--vre-eiker-k8a", + "xn--vrggt-xqad", + "xn--vry-yla5g", + "xn--yer-zna", + "xn--ygarden-p1a", + "xn--ystre-slidre-ujb", + "gs", + "gs", + "nes", + "gs", + "nes", + "gs", + "os", + "valer", + "xn--vler-qoa", + "gs", + "gs", + "os", + "gs", + "heroy", + "sande", + "gs", + "gs", + "bo", + "heroy", + "xn--b-5ga", + "xn--hery-ira", + "gs", + "gs", + "gs", + "gs", + "valer", + "gs", + "gs", + "gs", + "gs", + "bo", + "xn--b-5ga", + "gs", + "gs", + "gs", + "sande", + "gs", + "sande", + "xn--hery-ira", + "xn--vler-qoa", + "biz", + "com", + "edu", + "gov", + "info", + "net", + "org", + "merseine", + "mine", + "nom", + "shacknet", + "ac", + "co", + "cri", + "geek", + "gen", + "govt", + "health", + "iwi", + "kiwi", + "maori", + "mil", + "net", + "nym", + "org", + "parliament", + "school", + "xn--mori-qsa", + "blogspot", + "co", + "com", + "edu", + "gov", + "med", + "museum", + "net", + "org", + "pro", + "homelink", + "barsy", + "accesscam", + "ae", + "amune", + "blogdns", + "blogsite", + "bmoattachments", + "boldlygoingnowhere", + "cable-modem", + "camdvr", + "cdn77", + "cdn77-secure", + "certmgr", + "cloudns", + "collegefan", + "couchpotatofries", + "ddnss", + "diskstation", + "dnsalias", + "dnsdojo", + "doesntexist", + "dontexist", + "doomdns", + "dsmynas", + "duckdns", + "dvrdns", + "dynalias", + "dyndns", + "endofinternet", + "endoftheinternet", + "eu", + "familyds", + "fedorainfracloud", + "fedorapeople", + "fedoraproject", + "freeddns", + "from-me", + "game-host", + "gotdns", + "hepforge", + "hk", + "hobby-site", + "homedns", + "homeftp", + "homelinux", + "homeunix", + "hopto", + "is-a-bruinsfan", + "is-a-candidate", + "is-a-celticsfan", + "is-a-chef", + "is-a-geek", + "is-a-knight", + "is-a-linux-user", + "is-a-patsfan", + "is-a-soxfan", + "is-found", + "is-lost", + "is-saved", + "is-very-bad", + "is-very-evil", + "is-very-good", + "is-very-nice", + "is-very-sweet", + "isa-geek", + "js", + "kicks-ass", + "misconfused", + "mlbfan", + "my-firewall", + "myfirewall", + "myftp", + "mysecuritycamera", + "mywire", + "nflfan", + "no-ip", + "pimienta", + "podzone", + "poivron", + "potager", + "read-books", + "readmyblog", + "selfip", + "sellsyourhome", + "servebbs", + "serveftp", + "servegame", + "spdns", + "stuff-4-sale", + "sweetpepper", + "tunk", + "tuxfamily", + "twmail", + "ufcfan", + "us", + "webhop", + "webredirect", + "wmflabs", + "za", + "zapto", + "tele", + "c", + "rsc", + "origin", + "ssl", + "go", + "home", + "al", + "asso", + "at", + "au", + "be", + "bg", + "ca", + "cd", + "ch", + "cn", + "cy", + "cz", + "de", + "dk", + "edu", + "ee", + "es", + "fi", + "fr", + "gr", + "hr", + "hu", + "ie", + "il", + "in", + "int", + "is", + "it", + "jp", + "kr", + "lt", + "lu", + "lv", + "mc", + "me", + "mk", + "mt", + "my", + "net", + "ng", + "nl", + "no", + "nz", + "paris", + "pl", + "pt", + "q-a", + "ro", + "ru", + "se", + "si", + "sk", + "tr", + "uk", + "us", + "cloud", + "os", + "stg", + "app", + "os", + "app", + "nerdpol", + "abo", + "ac", + "com", + "edu", + "gob", + "ing", + "med", + "net", + "nom", + "org", + "sld", + "ybo", + "blogspot", + "com", + "edu", + "gob", + "mil", + "net", + "nom", + "nym", + "org", + "com", + "edu", + "org", + "com", + "edu", + "gov", + "i", + "mil", + "net", + "ngo", + "org", + "1337", + "biz", + "com", + "edu", + "fam", + "gob", + "gok", + "gon", + "gop", + "gos", + "gov", + "info", + "net", + "org", + "web", + "agro", + "aid", + "art", + "atm", + "augustow", + "auto", + "babia-gora", + "bedzin", + "beep", + "beskidy", + "bialowieza", + "bialystok", + "bielawa", + "bieszczady", + "biz", + "boleslawiec", + "bydgoszcz", + "bytom", + "cieszyn", + "co", + "com", + "czeladz", + "czest", + "dlugoleka", + "edu", + "elblag", + "elk", + "gda", + "gdansk", + "gdynia", + "gliwice", + "glogow", + "gmina", + "gniezno", + "gorlice", + "gov", + "grajewo", + "gsm", + "ilawa", + "info", + "jaworzno", + "jelenia-gora", + "jgora", + "kalisz", + "karpacz", + "kartuzy", + "kaszuby", + "katowice", + "kazimierz-dolny", + "kepno", + "ketrzyn", + "klodzko", + "kobierzyce", + "kolobrzeg", + "konin", + "konskowola", + "krakow", + "kutno", + "lapy", + "lebork", + "legnica", + "lezajsk", + "limanowa", + "lomza", + "lowicz", + "lubin", + "lukow", + "mail", + "malbork", + "malopolska", + "mazowsze", + "mazury", + "med", + "media", + "miasta", + "mielec", + "mielno", + "mil", + "mragowo", + "naklo", + "net", + "nieruchomosci", + "nom", + "nowaruda", + "nysa", + "olawa", + "olecko", + "olkusz", + "olsztyn", + "opoczno", + "opole", + "org", + "ostroda", + "ostroleka", + "ostrowiec", + "ostrowwlkp", + "pc", + "pila", + "pisz", + "podhale", + "podlasie", + "polkowice", + "pomorskie", + "pomorze", + "powiat", + "poznan", + "priv", + "prochowice", + "pruszkow", + "przeworsk", + "pulawy", + "radom", + "rawa-maz", + "realestate", + "rel", + "rybnik", + "rzeszow", + "sanok", + "sejny", + "sex", + "shop", + "sklep", + "skoczow", + "slask", + "slupsk", + "sopot", + "sos", + "sosnowiec", + "stalowa-wola", + "starachowice", + "stargard", + "suwalki", + "swidnica", + "swiebodzin", + "swinoujscie", + "szczecin", + "szczytno", + "szkola", + "targi", + "tarnobrzeg", + "tgory", + "tm", + "tourism", + "travel", + "turek", + "turystyka", + "tychy", + "ustka", + "walbrzych", + "warmia", + "warszawa", + "waw", + "wegrow", + "wielun", + "wlocl", + "wloclawek", + "wodzislaw", + "wolomin", + "wroc", + "wroclaw", + "zachpomor", + "zagan", + "zakopane", + "zarow", + "zgora", + "zgorzelec", + "ap", + "griw", + "ic", + "is", + "kmpsp", + "konsulat", + "kppsp", + "kwp", + "kwpsp", + "mup", + "mw", + "oirm", + "oum", + "pa", + "pinb", + "piw", + "po", + "psp", + "psse", + "pup", + "rzgw", + "sa", + "sdn", + "sko", + "so", + "sr", + "starostwo", + "ug", + "ugim", + "um", + "umig", + "upow", + "uppo", + "us", + "uw", + "uzs", + "wif", + "wiih", + "winb", + "wios", + "witd", + "wiw", + "wsa", + "wskr", + "wuoz", + "wzmiuw", + "zp", + "co", + "edu", + "gov", + "net", + "org", + "ac", + "biz", + "com", + "edu", + "est", + "gov", + "info", + "isla", + "name", + "net", + "org", + "pro", + "prof", + "aaa", + "aca", + "acct", + "avocat", + "bar", + "cloudns", + "cpa", + "eng", + "jur", + "law", + "med", + "recht", + "com", + "edu", + "gov", + "net", + "org", + "plo", + "sec", + "blogspot", + "com", + "edu", + "gov", + "int", + "net", + "nome", + "nym", + "org", + "publ", + "belau", + "cloudns", + "co", + "ed", + "go", + "ne", + "nom", + "or", + "com", + "coop", + "edu", + "gov", + "mil", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "nom", + "org", + "sch", + "asso", + "blogspot", + "com", + "nom", + "ybo", + "clan", + "arts", + "blogspot", + "com", + "firm", + "info", + "nom", + "nt", + "org", + "rec", + "shop", + "store", + "tm", + "www", + "lima-city", + "myddns", + "webspace", + "ac", + "blogspot", + "co", + "edu", + "gov", + "in", + "nom", + "org", + "ac", + "adygeya", + "bashkiria", + "bir", + "blogspot", + "cbg", + "cldmail", + "com", + "dagestan", + "edu", + "gov", + "grozny", + "int", + "kalmykia", + "kustanai", + "marine", + "mil", + "mordovia", + "msk", + "mytis", + "nalchik", + "net", + "nov", + "org", + "pp", + "pyatigorsk", + "spb", + "test", + "vladikavkaz", + "vladimir", + "hb", + "ac", + "co", + "com", + "edu", + "gouv", + "gov", + "int", + "mil", + "net", + "com", + "edu", + "gov", + "med", + "net", + "org", + "pub", + "sch", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "net", + "org", + "ybo", + "com", + "edu", + "gov", + "info", + "med", + "net", + "org", + "tv", + "a", + "ac", + "b", + "bd", + "blogspot", + "brand", + "c", + "com", + "d", + "e", + "f", + "fh", + "fhsk", + "fhv", + "g", + "h", + "i", + "k", + "komforb", + "kommunalforbund", + "komvux", + "l", + "lanbib", + "m", + "n", + "naturbruksgymn", + "o", + "org", + "p", + "parti", + "pp", + "press", + "r", + "s", + "t", + "tm", + "u", + "w", + "x", + "y", + "z", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "per", + "com", + "gov", + "hashbang", + "mil", + "net", + "now", + "org", + "platform", + "wedeploy", + "blogspot", + "nom", + "byen", + "cyon", + "platformsh", + "blogspot", + "nym", + "com", + "edu", + "gov", + "net", + "org", + "art", + "blogspot", + "com", + "edu", + "gouv", + "org", + "perso", + "univ", + "com", + "net", + "org", + "stackspace", + "uber", + "xs4all", + "co", + "com", + "consulado", + "edu", + "embaixada", + "gov", + "mil", + "net", + "org", + "principe", + "saotome", + "store", + "abkhazia", + "adygeya", + "aktyubinsk", + "arkhangelsk", + "armenia", + "ashgabad", + "azerbaijan", + "balashov", + "bashkiria", + "bryansk", + "bukhara", + "chimkent", + "dagestan", + "east-kazakhstan", + "exnet", + "georgia", + "grozny", + "ivanovo", + "jambyl", + "kalmykia", + "kaluga", + "karacol", + "karaganda", + "karelia", + "khakassia", + "krasnodar", + "kurgan", + "kustanai", + "lenug", + "mangyshlak", + "mordovia", + "msk", + "murmansk", + "nalchik", + "navoi", + "north-kazakhstan", + "nov", + "nym", + "obninsk", + "penza", + "pokrovsk", + "sochi", + "spb", + "tashkent", + "termez", + "togliatti", + "troitsk", + "tselinograd", + "tula", + "tuva", + "vladikavkaz", + "vladimir", + "vologda", + "barsy", + "com", + "edu", + "gob", + "org", + "red", + "gov", + "nym", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "knightpoint", + "ac", + "co", + "org", + "blogspot", + "ac", + "co", + "go", + "in", + "mi", + "net", + "or", + "ac", + "biz", + "co", + "com", + "edu", + "go", + "gov", + "int", + "mil", + "name", + "net", + "nic", + "org", + "test", + "web", + "gov", + "co", + "com", + "edu", + "gov", + "mil", + "net", + "nom", + "org", + "agrinet", + "com", + "defense", + "edunet", + "ens", + "fin", + "gov", + "ind", + "info", + "intl", + "mincom", + "nat", + "net", + "org", + "perso", + "rnrt", + "rns", + "rnu", + "tourism", + "turen", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "vpnplus", + "av", + "bbs", + "bel", + "biz", + "com", + "dr", + "edu", + "gen", + "gov", + "info", + "k12", + "kep", + "mil", + "name", + "nc", + "net", + "org", + "pol", + "tel", + "tv", + "web", + "blogspot", + "gov", + "ybo", + "aero", + "biz", + "co", + "com", + "coop", + "edu", + "gov", + "info", + "int", + "jobs", + "mobi", + "museum", + "name", + "net", + "org", + "pro", + "travel", + "better-than", + "dyndns", + "on-the-web", + "worse-than", + "blogspot", + "club", + "com", + "ebiz", + "edu", + "game", + "gov", + "idv", + "mil", + "net", + "nym", + "org", + "url", + "xn--czrw28b", + "xn--uc0atv", + "xn--zf0ao64a", + "mymailer", + "ac", + "co", + "go", + "hotel", + "info", + "me", + "mil", + "mobi", + "ne", + "or", + "sc", + "tv", + "biz", + "cc", + "cherkassy", + "cherkasy", + "chernigov", + "chernihiv", + "chernivtsi", + "chernovtsy", + "ck", + "cn", + "co", + "com", + "cr", + "crimea", + "cv", + "dn", + "dnepropetrovsk", + "dnipropetrovsk", + "dominic", + "donetsk", + "dp", + "edu", + "gov", + "if", + "in", + "inf", + "ivano-frankivsk", + "kh", + "kharkiv", + "kharkov", + "kherson", + "khmelnitskiy", + "khmelnytskyi", + "kiev", + "kirovograd", + "km", + "kr", + "krym", + "ks", + "kv", + "kyiv", + "lg", + "lt", + "ltd", + "lugansk", + "lutsk", + "lv", + "lviv", + "mk", + "mykolaiv", + "net", + "nikolaev", + "od", + "odesa", + "odessa", + "org", + "pl", + "poltava", + "pp", + "rivne", + "rovno", + "rv", + "sb", + "sebastopol", + "sevastopol", + "sm", + "sumy", + "te", + "ternopil", + "uz", + "uzhgorod", + "vinnica", + "vinnytsia", + "vn", + "volyn", + "yalta", + "zaporizhzhe", + "zaporizhzhia", + "zhitomir", + "zhytomyr", + "zp", + "zt", + "ac", + "blogspot", + "co", + "com", + "go", + "ne", + "nom", + "or", + "org", + "sc", + "ac", + "co", + "gov", + "ltd", + "me", + "net", + "nhs", + "org", + "plc", + "police", + "sch", + "blogspot", + "nh-serv", + "no-ip", + "wellbeingzone", + "homeoffice", + "service", + "ak", + "al", + "ar", + "as", + "az", + "ca", + "cloudns", + "co", + "ct", + "dc", + "de", + "dni", + "drud", + "fed", + "fl", + "ga", + "golffan", + "gu", + "hi", + "ia", + "id", + "il", + "in", + "is-by", + "isa", + "kids", + "ks", + "ky", + "la", + "land-4-sale", + "ma", + "md", + "me", + "mi", + "mn", + "mo", + "ms", + "mt", + "nc", + "nd", + "ne", + "nh", + "nj", + "nm", + "noip", + "nsn", + "nv", + "ny", + "oh", + "ok", + "or", + "pa", + "pointto", + "pr", + "ri", + "sc", + "sd", + "stuff-4-sale", + "tn", + "tx", + "ut", + "va", + "vi", + "vt", + "wa", + "wi", + "wv", + "wy", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "chtr", + "paroch", + "pvt", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "ann-arbor", + "cc", + "cog", + "dst", + "eaton", + "gen", + "k12", + "lib", + "mus", + "tec", + "washtenaw", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "cc", + "k12", + "lib", + "com", + "edu", + "gub", + "mil", + "net", + "nom", + "org", + "blogspot", + "co", + "com", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "net", + "nom", + "org", + "arts", + "co", + "com", + "e12", + "edu", + "firm", + "gob", + "gov", + "info", + "int", + "mil", + "net", + "org", + "rec", + "store", + "tec", + "web", + "nom", + "co", + "com", + "k12", + "net", + "org", + "ac", + "biz", + "blogspot", + "com", + "edu", + "gov", + "health", + "info", + "int", + "name", + "net", + "org", + "pro", + "com", + "edu", + "net", + "org", + "advisor", + "com", + "dyndns", + "edu", + "gov", + "mypets", + "net", + "org", + "xn--80au", + "xn--90azh", + "xn--c1avg", + "xn--d1at", + "xn--o1ac", + "xn--o1ach", + "xn--12c1fe0br", + "xn--12cfi8ixb8l", + "xn--12co0c3b4eva", + "xn--h3cuzk1di", + "xn--m3ch0j3a", + "xn--o3cyx2a", + "blogsite", + "fhapp", + "ac", + "agric", + "alt", + "co", + "edu", + "gov", + "grondar", + "law", + "mil", + "net", + "ngo", + "nis", + "nom", + "org", + "school", + "tm", + "web", + "blogspot", + "ac", + "biz", + "co", + "com", + "edu", + "gov", + "info", + "mil", + "net", + "org", + "sch", + "lima", + "triton", + "ac", + "co", + "gov", + "mil", + "org", +} diff --git a/vendor/golang.org/x/net/route/address.go b/vendor/golang.org/x/net/route/address.go new file mode 100644 index 000000000..e6bfa39e9 --- /dev/null +++ b/vendor/golang.org/x/net/route/address.go @@ -0,0 +1,425 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import "runtime" + +// An Addr represents an address associated with packet routing. +type Addr interface { + // Family returns an address family. + Family() int +} + +// A LinkAddr represents a link-layer address. +type LinkAddr struct { + Index int // interface index when attached + Name string // interface name when attached + Addr []byte // link-layer address when attached +} + +// Family implements the Family method of Addr interface. +func (a *LinkAddr) Family() int { return sysAF_LINK } + +func (a *LinkAddr) lenAndSpace() (int, int) { + l := 8 + len(a.Name) + len(a.Addr) + return l, roundup(l) +} + +func (a *LinkAddr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + nlen, alen := len(a.Name), len(a.Addr) + if nlen > 255 || alen > 255 { + return 0, errInvalidAddr + } + b[0] = byte(l) + b[1] = sysAF_LINK + if a.Index > 0 { + nativeEndian.PutUint16(b[2:4], uint16(a.Index)) + } + data := b[8:] + if nlen > 0 { + b[5] = byte(nlen) + copy(data[:nlen], a.Addr) + data = data[nlen:] + } + if alen > 0 { + b[6] = byte(alen) + copy(data[:alen], a.Name) + data = data[alen:] + } + return ll, nil +} + +func parseLinkAddr(b []byte) (Addr, error) { + if len(b) < 8 { + return nil, errInvalidAddr + } + _, a, err := parseKernelLinkAddr(sysAF_LINK, b[4:]) + if err != nil { + return nil, err + } + a.(*LinkAddr).Index = int(nativeEndian.Uint16(b[2:4])) + return a, nil +} + +// parseKernelLinkAddr parses b as a link-layer address in +// conventional BSD kernel form. +func parseKernelLinkAddr(_ int, b []byte) (int, Addr, error) { + // The encoding looks like the following: + // +----------------------------+ + // | Type (1 octet) | + // +----------------------------+ + // | Name length (1 octet) | + // +----------------------------+ + // | Address length (1 octet) | + // +----------------------------+ + // | Selector length (1 octet) | + // +----------------------------+ + // | Data (variable) | + // +----------------------------+ + // + // On some platforms, all-bit-one of length field means "don't + // care". + nlen, alen, slen := int(b[1]), int(b[2]), int(b[3]) + if nlen == 0xff { + nlen = 0 + } + if alen == 0xff { + alen = 0 + } + if slen == 0xff { + slen = 0 + } + l := 4 + nlen + alen + slen + if len(b) < l { + return 0, nil, errInvalidAddr + } + data := b[4:] + var name string + var addr []byte + if nlen > 0 { + name = string(data[:nlen]) + data = data[nlen:] + } + if alen > 0 { + addr = data[:alen] + data = data[alen:] + } + return l, &LinkAddr{Name: name, Addr: addr}, nil +} + +// An Inet4Addr represents an internet address for IPv4. +type Inet4Addr struct { + IP [4]byte // IP address +} + +// Family implements the Family method of Addr interface. +func (a *Inet4Addr) Family() int { return sysAF_INET } + +func (a *Inet4Addr) lenAndSpace() (int, int) { + return sizeofSockaddrInet, roundup(sizeofSockaddrInet) +} + +func (a *Inet4Addr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + b[0] = byte(l) + b[1] = sysAF_INET + copy(b[4:8], a.IP[:]) + return ll, nil +} + +// An Inet6Addr represents an internet address for IPv6. +type Inet6Addr struct { + IP [16]byte // IP address + ZoneID int // zone identifier +} + +// Family implements the Family method of Addr interface. +func (a *Inet6Addr) Family() int { return sysAF_INET6 } + +func (a *Inet6Addr) lenAndSpace() (int, int) { + return sizeofSockaddrInet6, roundup(sizeofSockaddrInet6) +} + +func (a *Inet6Addr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + b[0] = byte(l) + b[1] = sysAF_INET6 + copy(b[8:24], a.IP[:]) + if a.ZoneID > 0 { + nativeEndian.PutUint32(b[24:28], uint32(a.ZoneID)) + } + return ll, nil +} + +// parseInetAddr parses b as an internet address for IPv4 or IPv6. +func parseInetAddr(af int, b []byte) (Addr, error) { + switch af { + case sysAF_INET: + if len(b) < sizeofSockaddrInet { + return nil, errInvalidAddr + } + a := &Inet4Addr{} + copy(a.IP[:], b[4:8]) + return a, nil + case sysAF_INET6: + if len(b) < sizeofSockaddrInet6 { + return nil, errInvalidAddr + } + a := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))} + copy(a.IP[:], b[8:24]) + if a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) { + // KAME based IPv6 protocol stack usually + // embeds the interface index in the + // interface-local or link-local address as + // the kernel-internal form. + id := int(bigEndian.Uint16(a.IP[2:4])) + if id != 0 { + a.ZoneID = id + a.IP[2], a.IP[3] = 0, 0 + } + } + return a, nil + default: + return nil, errInvalidAddr + } +} + +// parseKernelInetAddr parses b as an internet address in conventional +// BSD kernel form. +func parseKernelInetAddr(af int, b []byte) (int, Addr, error) { + // The encoding looks similar to the NLRI encoding. + // +----------------------------+ + // | Length (1 octet) | + // +----------------------------+ + // | Address prefix (variable) | + // +----------------------------+ + // + // The differences between the kernel form and the NLRI + // encoding are: + // + // - The length field of the kernel form indicates the prefix + // length in bytes, not in bits + // + // - In the kernel form, zero value of the length field + // doesn't mean 0.0.0.0/0 or ::/0 + // + // - The kernel form appends leading bytes to the prefix field + // to make the tuple to be conformed with + // the routing message boundary + l := int(b[0]) + if runtime.GOOS == "darwin" { + // On Darwn, an address in the kernel form is also + // used as a message filler. + if l == 0 || len(b) > roundup(l) { + l = roundup(l) + } + } else { + l = roundup(l) + } + if len(b) < l { + return 0, nil, errInvalidAddr + } + // Don't reorder case expressions. + // The case expressions for IPv6 must come first. + const ( + off4 = 4 // offset of in_addr + off6 = 8 // offset of in6_addr + ) + switch { + case b[0] == sizeofSockaddrInet6: + a := &Inet6Addr{} + copy(a.IP[:], b[off6:off6+16]) + return int(b[0]), a, nil + case af == sysAF_INET6: + a := &Inet6Addr{} + if l-1 < off6 { + copy(a.IP[:], b[1:l]) + } else { + copy(a.IP[:], b[l-off6:l]) + } + return int(b[0]), a, nil + case b[0] == sizeofSockaddrInet: + a := &Inet4Addr{} + copy(a.IP[:], b[off4:off4+4]) + return int(b[0]), a, nil + default: // an old fashion, AF_UNSPEC or unknown means AF_INET + a := &Inet4Addr{} + if l-1 < off4 { + copy(a.IP[:], b[1:l]) + } else { + copy(a.IP[:], b[l-off4:l]) + } + return int(b[0]), a, nil + } +} + +// A DefaultAddr represents an address of various operating +// system-specific features. +type DefaultAddr struct { + af int + Raw []byte // raw format of address +} + +// Family implements the Family method of Addr interface. +func (a *DefaultAddr) Family() int { return a.af } + +func (a *DefaultAddr) lenAndSpace() (int, int) { + l := len(a.Raw) + return l, roundup(l) +} + +func (a *DefaultAddr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + if l > 255 { + return 0, errInvalidAddr + } + b[1] = byte(l) + copy(b[:l], a.Raw) + return ll, nil +} + +func parseDefaultAddr(b []byte) (Addr, error) { + if len(b) < 2 || len(b) < int(b[0]) { + return nil, errInvalidAddr + } + a := &DefaultAddr{af: int(b[1]), Raw: b[:b[0]]} + return a, nil +} + +func addrsSpace(as []Addr) int { + var l int + for _, a := range as { + switch a := a.(type) { + case *LinkAddr: + _, ll := a.lenAndSpace() + l += ll + case *Inet4Addr: + _, ll := a.lenAndSpace() + l += ll + case *Inet6Addr: + _, ll := a.lenAndSpace() + l += ll + case *DefaultAddr: + _, ll := a.lenAndSpace() + l += ll + } + } + return l +} + +// marshalAddrs marshals as and returns a bitmap indicating which +// address is stored in b. +func marshalAddrs(b []byte, as []Addr) (uint, error) { + var attrs uint + for i, a := range as { + switch a := a.(type) { + case *LinkAddr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *Inet4Addr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *Inet6Addr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *DefaultAddr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + } + } + return attrs, nil +} + +func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) ([]Addr, error) { + var as [sysRTAX_MAX]Addr + af := int(sysAF_UNSPEC) + for i := uint(0); i < sysRTAX_MAX && len(b) >= roundup(0); i++ { + if attrs&(1<> 8) +} + +func (binaryLittleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (binaryLittleEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func (binaryLittleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +type binaryBigEndian struct{} + +func (binaryBigEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[1]) | uint16(b[0])<<8 +} + +func (binaryBigEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 8) + b[1] = byte(v) +} + +func (binaryBigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (binaryBigEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) +} + +func (binaryBigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} diff --git a/vendor/golang.org/x/net/route/defs_darwin.go b/vendor/golang.org/x/net/route/defs_darwin.go new file mode 100644 index 000000000..e7716442d --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_darwin.go @@ -0,0 +1,114 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_STAT = C.NET_RT_STAT + sysNET_RT_TRASH = C.NET_RT_TRASH + sysNET_RT_IFLIST2 = C.NET_RT_IFLIST2 + sysNET_RT_DUMP2 = C.NET_RT_DUMP2 + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFINFO2 = C.RTM_IFINFO2 + sysRTM_NEWMADDR2 = C.RTM_NEWMADDR2 + sysRTM_GET2 = C.RTM_GET2 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrDarwin15 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrDarwin15 = C.sizeof_struct_ifa_msghdr + sizeofIfmaMsghdrDarwin15 = C.sizeof_struct_ifma_msghdr + sizeofIfMsghdr2Darwin15 = C.sizeof_struct_if_msghdr2 + sizeofIfmaMsghdr2Darwin15 = C.sizeof_struct_ifma_msghdr2 + sizeofIfDataDarwin15 = C.sizeof_struct_if_data + sizeofIfData64Darwin15 = C.sizeof_struct_if_data64 + + sizeofRtMsghdrDarwin15 = C.sizeof_struct_rt_msghdr + sizeofRtMsghdr2Darwin15 = C.sizeof_struct_rt_msghdr2 + sizeofRtMetricsDarwin15 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_dragonfly.go b/vendor/golang.org/x/net/route/defs_dragonfly.go new file mode 100644 index 000000000..dd31de269 --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_dragonfly.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_P1003_1B = C.CTL_P1003_1B + sysCTL_LWKT = C.CTL_LWKT + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_MPLS1 = C.RTA_MPLS1 + sysRTA_MPLS2 = C.RTA_MPLS2 + sysRTA_MPLS3 = C.RTA_MPLS3 + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MPLS1 = C.RTAX_MPLS1 + sysRTAX_MPLS2 = C.RTAX_MPLS2 + sysRTAX_MPLS3 = C.RTAX_MPLS3 + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrDragonFlyBSD4 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifa_msghdr + sizeofIfmaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrDragonFlyBSD4 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrDragonFlyBSD4 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsDragonFlyBSD4 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_freebsd.go b/vendor/golang.org/x/net/route/defs_freebsd.go new file mode 100644 index 000000000..d95594d8e --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_freebsd.go @@ -0,0 +1,337 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include + +struct if_data_freebsd7 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd8 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd9 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd10 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_vhid; + u_char ifi_baudrate_pf; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + uint64_t ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd11 { + uint8_t ifi_type; + uint8_t ifi_physical; + uint8_t ifi_addrlen; + uint8_t ifi_hdrlen; + uint8_t ifi_link_state; + uint8_t ifi_vhid; + uint16_t ifi_datalen; + uint32_t ifi_mtu; + uint32_t ifi_metric; + uint64_t ifi_baudrate; + uint64_t ifi_ipackets; + uint64_t ifi_ierrors; + uint64_t ifi_opackets; + uint64_t ifi_oerrors; + uint64_t ifi_collisions; + uint64_t ifi_ibytes; + uint64_t ifi_obytes; + uint64_t ifi_imcasts; + uint64_t ifi_omcasts; + uint64_t ifi_iqdrops; + uint64_t ifi_oqdrops; + uint64_t ifi_noproto; + uint64_t ifi_hwassist; + union { + time_t tt; + uint64_t ph; + } __ifi_epoch; + union { + struct timeval tv; + struct { + uint64_t ph1; + uint64_t ph2; + } ph; + } __ifi_lastchange; +}; + +struct if_msghdr_freebsd7 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd7 ifm_data; +}; + +struct if_msghdr_freebsd8 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd8 ifm_data; +}; + +struct if_msghdr_freebsd9 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd9 ifm_data; +}; + +struct if_msghdr_freebsd10 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd10 ifm_data; +}; + +struct if_msghdr_freebsd11 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd11 ifm_data; +}; +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_IFMALIST = C.NET_RT_IFMALIST + sysNET_RT_IFLISTL = C.NET_RT_IFLISTL +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_P1003_1B = C.CTL_P1003_1B +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrlFreeBSD10 = C.sizeof_struct_if_msghdrl + sizeofIfaMsghdrFreeBSD10 = C.sizeof_struct_ifa_msghdr + sizeofIfaMsghdrlFreeBSD10 = C.sizeof_struct_ifa_msghdrl + sizeofIfmaMsghdrFreeBSD10 = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrFreeBSD10 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrFreeBSD10 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsFreeBSD10 = C.sizeof_struct_rt_metrics + + sizeofIfMsghdrFreeBSD7 = C.sizeof_struct_if_msghdr_freebsd7 + sizeofIfMsghdrFreeBSD8 = C.sizeof_struct_if_msghdr_freebsd8 + sizeofIfMsghdrFreeBSD9 = C.sizeof_struct_if_msghdr_freebsd9 + sizeofIfMsghdrFreeBSD10 = C.sizeof_struct_if_msghdr_freebsd10 + sizeofIfMsghdrFreeBSD11 = C.sizeof_struct_if_msghdr_freebsd11 + + sizeofIfDataFreeBSD7 = C.sizeof_struct_if_data_freebsd7 + sizeofIfDataFreeBSD8 = C.sizeof_struct_if_data_freebsd8 + sizeofIfDataFreeBSD9 = C.sizeof_struct_if_data_freebsd9 + sizeofIfDataFreeBSD10 = C.sizeof_struct_if_data_freebsd10 + sizeofIfDataFreeBSD11 = C.sizeof_struct_if_data_freebsd11 + + sizeofIfMsghdrlFreeBSD10Emu = C.sizeof_struct_if_msghdrl + sizeofIfaMsghdrFreeBSD10Emu = C.sizeof_struct_ifa_msghdr + sizeofIfaMsghdrlFreeBSD10Emu = C.sizeof_struct_ifa_msghdrl + sizeofIfmaMsghdrFreeBSD10Emu = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrFreeBSD10Emu = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrFreeBSD10Emu = C.sizeof_struct_rt_msghdr + sizeofRtMetricsFreeBSD10Emu = C.sizeof_struct_rt_metrics + + sizeofIfMsghdrFreeBSD7Emu = C.sizeof_struct_if_msghdr_freebsd7 + sizeofIfMsghdrFreeBSD8Emu = C.sizeof_struct_if_msghdr_freebsd8 + sizeofIfMsghdrFreeBSD9Emu = C.sizeof_struct_if_msghdr_freebsd9 + sizeofIfMsghdrFreeBSD10Emu = C.sizeof_struct_if_msghdr_freebsd10 + sizeofIfMsghdrFreeBSD11Emu = C.sizeof_struct_if_msghdr_freebsd11 + + sizeofIfDataFreeBSD7Emu = C.sizeof_struct_if_data_freebsd7 + sizeofIfDataFreeBSD8Emu = C.sizeof_struct_if_data_freebsd8 + sizeofIfDataFreeBSD9Emu = C.sizeof_struct_if_data_freebsd9 + sizeofIfDataFreeBSD10Emu = C.sizeof_struct_if_data_freebsd10 + sizeofIfDataFreeBSD11Emu = C.sizeof_struct_if_data_freebsd11 + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_netbsd.go b/vendor/golang.org/x/net/route/defs_netbsd.go new file mode 100644 index 000000000..b0abd549a --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_netbsd.go @@ -0,0 +1,112 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_DDB = C.CTL_DDB + sysCTL_PROC = C.CTL_PROC + sysCTL_VENDOR = C.CTL_VENDOR + sysCTL_EMUL = C.CTL_EMUL + sysCTL_SECURITY = C.CTL_SECURITY + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + sysRTM_SETGATE = C.RTM_SETGATE + sysRTM_LLINFO_UPD = C.RTM_LLINFO_UPD + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_CHGADDR = C.RTM_CHGADDR + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_TAG = C.RTA_TAG + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_TAG = C.RTAX_TAG + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrNetBSD7 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrNetBSD7 = C.sizeof_struct_ifa_msghdr + sizeofIfAnnouncemsghdrNetBSD7 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrNetBSD7 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsNetBSD7 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_openbsd.go b/vendor/golang.org/x/net/route/defs_openbsd.go new file mode 100644 index 000000000..173bb5d51 --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_openbsd.go @@ -0,0 +1,116 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_STATS = C.NET_RT_STATS + sysNET_RT_TABLE = C.NET_RT_TABLE + sysNET_RT_IFNAMES = C.NET_RT_IFNAMES + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_FS = C.CTL_FS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_DDB = C.CTL_DDB + sysCTL_VFS = C.CTL_VFS + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_DESYNC = C.RTM_DESYNC + sysRTM_INVALIDATE = C.RTM_INVALIDATE + sysRTM_BFD = C.RTM_BFD + sysRTM_PROPOSAL = C.RTM_PROPOSAL + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_SRC = C.RTA_SRC + sysRTA_SRCMASK = C.RTA_SRCMASK + sysRTA_LABEL = C.RTA_LABEL + sysRTA_BFD = C.RTA_BFD + sysRTA_DNS = C.RTA_DNS + sysRTA_STATIC = C.RTA_STATIC + sysRTA_SEARCH = C.RTA_SEARCH + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_SRC = C.RTAX_SRC + sysRTAX_SRCMASK = C.RTAX_SRCMASK + sysRTAX_LABEL = C.RTAX_LABEL + sysRTAX_BFD = C.RTAX_BFD + sysRTAX_DNS = C.RTAX_DNS + sysRTAX_STATIC = C.RTAX_STATIC + sysRTAX_SEARCH = C.RTAX_SEARCH + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofRtMsghdr = C.sizeof_struct_rt_msghdr + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/interface.go b/vendor/golang.org/x/net/route/interface.go new file mode 100644 index 000000000..854906d9c --- /dev/null +++ b/vendor/golang.org/x/net/route/interface.go @@ -0,0 +1,64 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +// An InterfaceMessage represents an interface message. +type InterfaceMessage struct { + Version int // message version + Type int // message type + Flags int // interface flags + Index int // interface index + Name string // interface name + Addrs []Addr // addresses + + extOff int // offset of header extension + raw []byte // raw message +} + +// An InterfaceAddrMessage represents an interface address message. +type InterfaceAddrMessage struct { + Version int // message version + Type int // message type + Flags int // interface flags + Index int // interface index + Addrs []Addr // addresses + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceAddrMessage) Sys() []Sys { return nil } + +// An InterfaceMulticastAddrMessage represents an interface multicast +// address message. +type InterfaceMulticastAddrMessage struct { + Version int // message version + Type int // messsage type + Flags int // interface flags + Index int // interface index + Addrs []Addr // addresses + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMulticastAddrMessage) Sys() []Sys { return nil } + +// An InterfaceAnnounceMessage represents an interface announcement +// message. +type InterfaceAnnounceMessage struct { + Version int // message version + Type int // message type + Index int // interface index + Name string // interface name + What int // what type of announcement + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceAnnounceMessage) Sys() []Sys { return nil } diff --git a/vendor/golang.org/x/net/route/interface_announce.go b/vendor/golang.org/x/net/route/interface_announce.go new file mode 100644 index 000000000..520d657b5 --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_announce.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd netbsd + +package route + +func (w *wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAnnounceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Index: int(nativeEndian.Uint16(b[4:6])), + What: int(nativeEndian.Uint16(b[22:24])), + raw: b[:l], + } + for i := 0; i < 16; i++ { + if b[6+i] != 0 { + continue + } + m.Name = string(b[6 : 6+i]) + break + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_classic.go b/vendor/golang.org/x/net/route/interface_classic.go new file mode 100644 index 000000000..ac4e7a680 --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_classic.go @@ -0,0 +1,66 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly netbsd + +package route + +import "runtime" + +func (w *wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[4:8])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Addrs: make([]Addr, sysRTAX_MAX), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + extOff: w.extOff, + raw: b[:l], + } + a, err := parseLinkAddr(b[w.bodyOff:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (w *wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + raw: b[:l], + } + if runtime.GOOS == "netbsd" { + m.Index = int(nativeEndian.Uint16(b[16:18])) + } else { + m.Index = int(nativeEndian.Uint16(b[12:14])) + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_freebsd.go b/vendor/golang.org/x/net/route/interface_freebsd.go new file mode 100644 index 000000000..9f6f50c00 --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_freebsd.go @@ -0,0 +1,78 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (w *wireFormat) parseInterfaceMessage(typ RIBType, b []byte) (Message, error) { + var extOff, bodyOff int + if typ == sysNET_RT_IFLISTL { + if len(b) < 20 { + return nil, errMessageTooShort + } + extOff = int(nativeEndian.Uint16(b[18:20])) + bodyOff = int(nativeEndian.Uint16(b[16:18])) + } else { + extOff = w.extOff + bodyOff = w.bodyOff + } + if len(b) < extOff || len(b) < bodyOff { + return nil, errInvalidMessage + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[4:8])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + Addrs: make([]Addr, sysRTAX_MAX), + extOff: extOff, + raw: b[:l], + } + a, err := parseLinkAddr(b[bodyOff:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (w *wireFormat) parseInterfaceAddrMessage(typ RIBType, b []byte) (Message, error) { + var bodyOff int + if typ == sysNET_RT_IFLISTL { + if len(b) < 24 { + return nil, errMessageTooShort + } + bodyOff = int(nativeEndian.Uint16(b[16:18])) + } else { + bodyOff = w.bodyOff + } + if len(b) < bodyOff { + return nil, errInvalidMessage + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_multicast.go b/vendor/golang.org/x/net/route/interface_multicast.go new file mode 100644 index 000000000..1e99a9cc6 --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_multicast.go @@ -0,0 +1,30 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd + +package route + +func (w *wireFormat) parseInterfaceMulticastAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceMulticastAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_openbsd.go b/vendor/golang.org/x/net/route/interface_openbsd.go new file mode 100644 index 000000000..e4a143c1c --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_openbsd.go @@ -0,0 +1,90 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (*wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 32 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[12:16])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[16:20])), + Index: int(nativeEndian.Uint16(b[6:8])), + Addrs: make([]Addr, sysRTAX_MAX), + raw: b[:l], + } + ll := int(nativeEndian.Uint16(b[4:6])) + if len(b) < ll { + return nil, errInvalidMessage + } + a, err := parseLinkAddr(b[ll:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (*wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 24 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + bodyOff := int(nativeEndian.Uint16(b[4:6])) + if len(b) < bodyOff { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[12:16])), + Index: int(nativeEndian.Uint16(b[6:8])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} + +func (*wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 26 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAnnounceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Index: int(nativeEndian.Uint16(b[6:8])), + What: int(nativeEndian.Uint16(b[8:10])), + raw: b[:l], + } + for i := 0; i < 16; i++ { + if b[10+i] != 0 { + continue + } + m.Name = string(b[10 : 10+i]) + break + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/message.go b/vendor/golang.org/x/net/route/message.go new file mode 100644 index 000000000..0fa7e09f4 --- /dev/null +++ b/vendor/golang.org/x/net/route/message.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +// A Message represents a routing message. +type Message interface { + // Sys returns operating system-specific information. + Sys() []Sys +} + +// A Sys reprensents operating system-specific information. +type Sys interface { + // SysType returns a type of operating system-specific + // information. + SysType() SysType +} + +// A SysType represents a type of operating system-specific +// information. +type SysType int + +const ( + SysMetrics SysType = iota + SysStats +) + +// ParseRIB parses b as a routing information base and returns a list +// of routing messages. +func ParseRIB(typ RIBType, b []byte) ([]Message, error) { + if !typ.parseable() { + return nil, errUnsupportedMessage + } + var msgs []Message + nmsgs, nskips := 0, 0 + for len(b) > 4 { + nmsgs++ + l := int(nativeEndian.Uint16(b[:2])) + if l == 0 { + return nil, errInvalidMessage + } + if len(b) < l { + return nil, errMessageTooShort + } + if b[2] != sysRTM_VERSION { + b = b[l:] + continue + } + if w, ok := wireFormats[int(b[3])]; !ok { + nskips++ + } else { + m, err := w.parse(typ, b) + if err != nil { + return nil, err + } + if m == nil { + nskips++ + } else { + msgs = append(msgs, m) + } + } + b = b[l:] + } + // We failed to parse any of the messages - version mismatch? + if nmsgs != len(msgs)+nskips { + return nil, errMessageMismatch + } + return msgs, nil +} diff --git a/vendor/golang.org/x/net/route/message_darwin_test.go b/vendor/golang.org/x/net/route/message_darwin_test.go new file mode 100644 index 000000000..316aa7507 --- /dev/null +++ b/vendor/golang.org/x/net/route/message_darwin_test.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "testing" + +func TestFetchAndParseRIBOnDarwin(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_FLAGS, sysNET_RT_DUMP2, sysNET_RT_IFLIST2} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(s) + } + } +} diff --git a/vendor/golang.org/x/net/route/message_freebsd_test.go b/vendor/golang.org/x/net/route/message_freebsd_test.go new file mode 100644 index 000000000..db4b56752 --- /dev/null +++ b/vendor/golang.org/x/net/route/message_freebsd_test.go @@ -0,0 +1,92 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import ( + "testing" + "unsafe" +) + +func TestFetchAndParseRIBOnFreeBSD(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_IFMALIST} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(s) + } + } +} + +func TestFetchAndParseRIBOnFreeBSD10AndAbove(t *testing.T) { + if _, err := FetchRIB(sysAF_UNSPEC, sysNET_RT_IFLISTL, 0); err != nil { + t.Skip("NET_RT_IFLISTL not supported") + } + var p uintptr + if kernelAlign != int(unsafe.Sizeof(p)) { + t.Skip("NET_RT_IFLIST vs. NET_RT_IFLISTL doesn't work for 386 emulation on amd64") + } + + var tests = [2]struct { + typ RIBType + b []byte + msgs []Message + ss []string + }{ + {typ: sysNET_RT_IFLIST}, + {typ: sysNET_RT_IFLISTL}, + } + for i := range tests { + var lastErr error + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, tests[i].typ) + if err != nil { + lastErr = err + continue + } + tests[i].msgs = append(tests[i].msgs, rs...) + } + if len(tests[i].msgs) == 0 && lastErr != nil { + t.Error(tests[i].typ, lastErr) + continue + } + tests[i].ss, lastErr = msgs(tests[i].msgs).validate() + if lastErr != nil { + t.Error(tests[i].typ, lastErr) + continue + } + for _, s := range tests[i].ss { + t.Log(s) + } + } + for i := len(tests) - 1; i > 0; i-- { + if len(tests[i].ss) != len(tests[i-1].ss) { + t.Errorf("got %v; want %v", tests[i].ss, tests[i-1].ss) + continue + } + for j, s1 := range tests[i].ss { + s0 := tests[i-1].ss[j] + if s1 != s0 { + t.Errorf("got %s; want %s", s1, s0) + } + } + } +} diff --git a/vendor/golang.org/x/net/route/message_test.go b/vendor/golang.org/x/net/route/message_test.go new file mode 100644 index 000000000..e848dabf4 --- /dev/null +++ b/vendor/golang.org/x/net/route/message_test.go @@ -0,0 +1,239 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "os" + "syscall" + "testing" + "time" +) + +func TestFetchAndParseRIB(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_DUMP, sysNET_RT_IFLIST} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(typ, s) + } + } +} + +var ( + rtmonSock int + rtmonErr error +) + +func init() { + // We need to keep rtmonSock alive to avoid treading on + // recycled socket descriptors. + rtmonSock, rtmonErr = syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC) +} + +// TestMonitorAndParseRIB leaks a worker goroutine and a socket +// descriptor but that's intentional. +func TestMonitorAndParseRIB(t *testing.T) { + if testing.Short() || os.Getuid() != 0 { + t.Skip("must be root") + } + + if rtmonErr != nil { + t.Fatal(rtmonErr) + } + + // We suppose that using an IPv4 link-local address and the + // dot1Q ID for Token Ring and FDDI doesn't harm anyone. + pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} + if err := pv.configure(1002); err != nil { + t.Skip(err) + } + if err := pv.setup(); err != nil { + t.Skip(err) + } + pv.teardown() + + go func() { + b := make([]byte, os.Getpagesize()) + for { + // There's no easy way to unblock this read + // call because the routing message exchange + // over routing socket is a connectionless + // message-oriented protocol, no control plane + // for signaling connectivity, and we cannot + // use the net package of standard library due + // to the lack of support for routing socket + // and circular dependency. + n, err := syscall.Read(rtmonSock, b) + if err != nil { + return + } + ms, err := ParseRIB(0, b[:n]) + if err != nil { + t.Error(err) + return + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(err) + return + } + for _, s := range ss { + t.Log(s) + } + } + }() + + for _, vid := range []int{1002, 1003, 1004, 1005} { + pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} + if err := pv.configure(vid); err != nil { + t.Fatal(err) + } + if err := pv.setup(); err != nil { + t.Fatal(err) + } + time.Sleep(200 * time.Millisecond) + if err := pv.teardown(); err != nil { + t.Fatal(err) + } + time.Sleep(200 * time.Millisecond) + } +} + +func TestParseRIBWithFuzz(t *testing.T) { + for _, fuzz := range []string{ + "0\x00\x05\x050000000000000000" + + "00000000000000000000" + + "00000000000000000000" + + "00000000000000000000" + + "0000000000000\x02000000" + + "00000000", + "\x02\x00\x05\f0000000000000000" + + "0\x0200000000000000", + "\x02\x00\x05\x100000000000000\x1200" + + "0\x00\xff\x00", + "\x02\x00\x05\f0000000000000000" + + "0\x12000\x00\x02\x0000", + "\x00\x00\x00\x01\x00", + "00000", + } { + for typ := RIBType(0); typ < 256; typ++ { + ParseRIB(typ, []byte(fuzz)) + } + } +} + +func TestRouteMessage(t *testing.T) { + s, err := syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC) + if err != nil { + t.Fatal(err) + } + defer syscall.Close(s) + + var ms []RouteMessage + for _, af := range []int{sysAF_INET, sysAF_INET6} { + if _, err := fetchAndParseRIB(af, sysNET_RT_DUMP); err != nil { + t.Log(err) + continue + } + switch af { + case sysAF_INET: + ms = append(ms, []RouteMessage{ + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet4Addr{IP: [4]byte{127, 0, 0, 1}}, + nil, + nil, + nil, + &LinkAddr{}, + &Inet4Addr{}, + nil, + &Inet4Addr{}, + }, + }, + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet4Addr{IP: [4]byte{127, 0, 0, 1}}, + }, + }, + }...) + case sysAF_INET6: + ms = append(ms, []RouteMessage{ + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + nil, + nil, + nil, + &LinkAddr{}, + &Inet6Addr{}, + nil, + &Inet6Addr{}, + }, + }, + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + }, + }, + }...) + } + } + for i, m := range ms { + m.ID = uintptr(os.Getpid()) + m.Seq = i + 1 + wb, err := m.Marshal() + if err != nil { + t.Fatalf("%v: %v", m, err) + } + if _, err := syscall.Write(s, wb); err != nil { + t.Fatalf("%v: %v", m, err) + } + rb := make([]byte, os.Getpagesize()) + n, err := syscall.Read(s, rb) + if err != nil { + t.Fatalf("%v: %v", m, err) + } + rms, err := ParseRIB(0, rb[:n]) + if err != nil { + t.Fatalf("%v: %v", m, err) + } + for _, rm := range rms { + err := rm.(*RouteMessage).Err + if err != nil { + t.Errorf("%v: %v", m, err) + } + } + ss, err := msgs(rms).validate() + if err != nil { + t.Fatalf("%v: %v", m, err) + } + for _, s := range ss { + t.Log(s) + } + } +} diff --git a/vendor/golang.org/x/net/route/route.go b/vendor/golang.org/x/net/route/route.go new file mode 100644 index 000000000..081da0d5c --- /dev/null +++ b/vendor/golang.org/x/net/route/route.go @@ -0,0 +1,123 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +// Package route provides basic functions for the manipulation of +// packet routing facilities on BSD variants. +// +// The package supports any version of Darwin, any version of +// DragonFly BSD, FreeBSD 7 through 11, NetBSD 6 and above, and +// OpenBSD 5.6 and above. +package route + +import ( + "errors" + "os" + "syscall" +) + +var ( + errUnsupportedMessage = errors.New("unsupported message") + errMessageMismatch = errors.New("message mismatch") + errMessageTooShort = errors.New("message too short") + errInvalidMessage = errors.New("invalid message") + errInvalidAddr = errors.New("invalid address") + errShortBuffer = errors.New("short buffer") +) + +// A RouteMessage represents a message conveying an address prefix, a +// nexthop address and an output interface. +// +// Unlike other messages, this message can be used to query adjacency +// information for the given address prefix, to add a new route, and +// to delete or modify the existing route from the routing information +// base inside the kernel by writing and reading route messages on a +// routing socket. +// +// For the manipulation of routing information, the route message must +// contain appropriate fields that include: +// +// Version = +// Type = +// Flags = +// Index = +// ID = +// Seq = +// Addrs = +// +// The Type field specifies a type of manipulation, the Flags field +// specifies a class of target information and the Addrs field +// specifies target information like the following: +// +// route.RouteMessage{ +// Version: RTM_VERSION, +// Type: RTM_GET, +// Flags: RTF_UP | RTF_HOST, +// ID: uintptr(os.Getpid()), +// Seq: 1, +// Addrs: []route.Addrs{ +// RTAX_DST: &route.Inet4Addr{ ... }, +// RTAX_IFP: &route.LinkAddr{ ... }, +// RTAX_BRD: &route.Inet4Addr{ ... }, +// }, +// } +// +// The values for the above fields depend on the implementation of +// each operating system. +// +// The Err field on a response message contains an error value on the +// requested operation. If non-nil, the requested operation is failed. +type RouteMessage struct { + Version int // message version + Type int // message type + Flags int // route flags + Index int // interface index when atatched + ID uintptr // sender's identifier; usually process ID + Seq int // sequence number + Err error // error on requested operation + Addrs []Addr // addresses + + extOff int // offset of header extension + raw []byte // raw message +} + +// Marshal returns the binary encoding of m. +func (m *RouteMessage) Marshal() ([]byte, error) { + return m.marshal() +} + +// A RIBType reprensents a type of routing information base. +type RIBType int + +const ( + RIBTypeRoute RIBType = syscall.NET_RT_DUMP + RIBTypeInterface RIBType = syscall.NET_RT_IFLIST +) + +// FetchRIB fetches a routing information base from the operating +// system. +// +// The provided af must be an address family. +// +// The provided arg must be a RIBType-specific argument. +// When RIBType is related to routes, arg might be a set of route +// flags. When RIBType is related to network interfaces, arg might be +// an interface index or a set of interface flags. In most cases, zero +// means a wildcard. +func FetchRIB(af int, typ RIBType, arg int) ([]byte, error) { + mib := [6]int32{sysCTL_NET, sysAF_ROUTE, 0, int32(af), int32(typ), int32(arg)} + n := uintptr(0) + if err := sysctl(mib[:], nil, &n, nil, 0); err != nil { + return nil, os.NewSyscallError("sysctl", err) + } + if n == 0 { + return nil, nil + } + b := make([]byte, n) + if err := sysctl(mib[:], &b[0], &n, nil, 0); err != nil { + return nil, os.NewSyscallError("sysctl", err) + } + return b[:n], nil +} diff --git a/vendor/golang.org/x/net/route/route_classic.go b/vendor/golang.org/x/net/route/route_classic.go new file mode 100644 index 000000000..02fa68830 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_classic.go @@ -0,0 +1,75 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd + +package route + +import ( + "runtime" + "syscall" +) + +func (m *RouteMessage) marshal() ([]byte, error) { + w, ok := wireFormats[m.Type] + if !ok { + return nil, errUnsupportedMessage + } + l := w.bodyOff + addrsSpace(m.Addrs) + if runtime.GOOS == "darwin" { + // Fix stray pointer writes on macOS. + // See golang.org/issue/22456. + l += 1024 + } + b := make([]byte, l) + nativeEndian.PutUint16(b[:2], uint16(l)) + if m.Version == 0 { + b[2] = sysRTM_VERSION + } else { + b[2] = byte(m.Version) + } + b[3] = byte(m.Type) + nativeEndian.PutUint32(b[8:12], uint32(m.Flags)) + nativeEndian.PutUint16(b[4:6], uint16(m.Index)) + nativeEndian.PutUint32(b[16:20], uint32(m.ID)) + nativeEndian.PutUint32(b[20:24], uint32(m.Seq)) + attrs, err := marshalAddrs(b[w.bodyOff:], m.Addrs) + if err != nil { + return nil, err + } + if attrs > 0 { + nativeEndian.PutUint32(b[12:16], uint32(attrs)) + } + return b, nil +} + +func (w *wireFormat) parseRouteMessage(typ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &RouteMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[4:6])), + ID: uintptr(nativeEndian.Uint32(b[16:20])), + Seq: int(nativeEndian.Uint32(b[20:24])), + extOff: w.extOff, + raw: b[:l], + } + errno := syscall.Errno(nativeEndian.Uint32(b[28:32])) + if errno != 0 { + m.Err = errno + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/route_openbsd.go b/vendor/golang.org/x/net/route/route_openbsd.go new file mode 100644 index 000000000..daf2e90c4 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_openbsd.go @@ -0,0 +1,65 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "syscall" + +func (m *RouteMessage) marshal() ([]byte, error) { + l := sizeofRtMsghdr + addrsSpace(m.Addrs) + b := make([]byte, l) + nativeEndian.PutUint16(b[:2], uint16(l)) + if m.Version == 0 { + b[2] = sysRTM_VERSION + } else { + b[2] = byte(m.Version) + } + b[3] = byte(m.Type) + nativeEndian.PutUint16(b[4:6], uint16(sizeofRtMsghdr)) + nativeEndian.PutUint32(b[16:20], uint32(m.Flags)) + nativeEndian.PutUint16(b[6:8], uint16(m.Index)) + nativeEndian.PutUint32(b[24:28], uint32(m.ID)) + nativeEndian.PutUint32(b[28:32], uint32(m.Seq)) + attrs, err := marshalAddrs(b[sizeofRtMsghdr:], m.Addrs) + if err != nil { + return nil, err + } + if attrs > 0 { + nativeEndian.PutUint32(b[12:16], uint32(attrs)) + } + return b, nil +} + +func (*wireFormat) parseRouteMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < sizeofRtMsghdr { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &RouteMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[16:20])), + Index: int(nativeEndian.Uint16(b[6:8])), + ID: uintptr(nativeEndian.Uint32(b[24:28])), + Seq: int(nativeEndian.Uint32(b[28:32])), + raw: b[:l], + } + ll := int(nativeEndian.Uint16(b[4:6])) + if len(b) < ll { + return nil, errInvalidMessage + } + errno := syscall.Errno(nativeEndian.Uint32(b[32:36])) + if errno != 0 { + m.Err = errno + } + as, err := parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[ll:]) + if err != nil { + return nil, err + } + m.Addrs = as + return m, nil +} diff --git a/vendor/golang.org/x/net/route/route_test.go b/vendor/golang.org/x/net/route/route_test.go new file mode 100644 index 000000000..61bd17454 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_test.go @@ -0,0 +1,390 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "fmt" + "os/exec" + "runtime" + "time" +) + +func (m *RouteMessage) String() string { + return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[12:16]))) +} + +func (m *InterfaceMessage) String() string { + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + return fmt.Sprintf("%s", attrs) +} + +func (m *InterfaceAddrMessage) String() string { + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + return fmt.Sprintf("%s", attrs) +} + +func (m *InterfaceMulticastAddrMessage) String() string { + return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[4:8]))) +} + +func (m *InterfaceAnnounceMessage) String() string { + what := "" + switch m.What { + case 0: + what = "arrival" + case 1: + what = "departure" + } + return fmt.Sprintf("(%d %s %s)", m.Index, m.Name, what) +} + +func (m *InterfaceMetrics) String() string { + return fmt.Sprintf("(type=%d mtu=%d)", m.Type, m.MTU) +} + +func (m *RouteMetrics) String() string { + return fmt.Sprintf("(pmtu=%d)", m.PathMTU) +} + +type addrAttrs uint + +var addrAttrNames = [...]string{ + "dst", + "gateway", + "netmask", + "genmask", + "ifp", + "ifa", + "author", + "brd", + "df:mpls1-n:tag-o:src", // mpls1 for dragonfly, tag for netbsd, src for openbsd + "df:mpls2-o:srcmask", // mpls2 for dragonfly, srcmask for openbsd + "df:mpls3-o:label", // mpls3 for dragonfly, label for openbsd + "o:bfd", // bfd for openbsd + "o:dns", // dns for openbsd + "o:static", // static for openbsd + "o:search", // search for openbsd +} + +func (attrs addrAttrs) String() string { + var s string + for i, name := range addrAttrNames { + if attrs&(1<" + } + return s +} + +type msgs []Message + +func (ms msgs) validate() ([]string, error) { + var ss []string + for _, m := range ms { + switch m := m.(type) { + case *RouteMessage: + if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[12:16]))); err != nil { + return nil, err + } + sys := m.Sys() + if sys == nil { + return nil, fmt.Errorf("no sys for %s", m.String()) + } + ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String()) + case *InterfaceMessage: + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + if err := addrs(m.Addrs).match(attrs); err != nil { + return nil, err + } + sys := m.Sys() + if sys == nil { + return nil, fmt.Errorf("no sys for %s", m.String()) + } + ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String()) + case *InterfaceAddrMessage: + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + if err := addrs(m.Addrs).match(attrs); err != nil { + return nil, err + } + ss = append(ss, m.String()+" "+addrs(m.Addrs).String()) + case *InterfaceMulticastAddrMessage: + if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[4:8]))); err != nil { + return nil, err + } + ss = append(ss, m.String()+" "+addrs(m.Addrs).String()) + case *InterfaceAnnounceMessage: + ss = append(ss, m.String()) + default: + ss = append(ss, fmt.Sprintf("%+v", m)) + } + } + return ss, nil +} + +type syss []Sys + +func (sys syss) String() string { + var s string + for _, sy := range sys { + switch sy := sy.(type) { + case *InterfaceMetrics: + if len(s) > 0 { + s += " " + } + s += sy.String() + case *RouteMetrics: + if len(s) > 0 { + s += " " + } + s += sy.String() + } + } + return s +} + +type addrFamily int + +func (af addrFamily) String() string { + switch af { + case sysAF_UNSPEC: + return "unspec" + case sysAF_LINK: + return "link" + case sysAF_INET: + return "inet4" + case sysAF_INET6: + return "inet6" + default: + return fmt.Sprintf("%d", af) + } +} + +const hexDigit = "0123456789abcdef" + +type llAddr []byte + +func (a llAddr) String() string { + if len(a) == 0 { + return "" + } + buf := make([]byte, 0, len(a)*3-1) + for i, b := range a { + if i > 0 { + buf = append(buf, ':') + } + buf = append(buf, hexDigit[b>>4]) + buf = append(buf, hexDigit[b&0xF]) + } + return string(buf) +} + +type ipAddr []byte + +func (a ipAddr) String() string { + if len(a) == 0 { + return "" + } + if len(a) == 4 { + return fmt.Sprintf("%d.%d.%d.%d", a[0], a[1], a[2], a[3]) + } + if len(a) == 16 { + return fmt.Sprintf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]) + } + s := make([]byte, len(a)*2) + for i, tn := range a { + s[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf] + } + return string(s) +} + +func (a *LinkAddr) String() string { + name := a.Name + if name == "" { + name = "" + } + lla := llAddr(a.Addr).String() + if lla == "" { + lla = "" + } + return fmt.Sprintf("(%v %d %s %s)", addrFamily(a.Family()), a.Index, name, lla) +} + +func (a *Inet4Addr) String() string { + return fmt.Sprintf("(%v %v)", addrFamily(a.Family()), ipAddr(a.IP[:])) +} + +func (a *Inet6Addr) String() string { + return fmt.Sprintf("(%v %v %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.ZoneID) +} + +func (a *DefaultAddr) String() string { + return fmt.Sprintf("(%v %s)", addrFamily(a.Family()), ipAddr(a.Raw[2:]).String()) +} + +type addrs []Addr + +func (as addrs) String() string { + var s string + for _, a := range as { + if a == nil { + continue + } + if len(s) > 0 { + s += " " + } + switch a := a.(type) { + case *LinkAddr: + s += a.String() + case *Inet4Addr: + s += a.String() + case *Inet6Addr: + s += a.String() + case *DefaultAddr: + s += a.String() + } + } + if s == "" { + return "" + } + return s +} + +func (as addrs) match(attrs addrAttrs) error { + var ts addrAttrs + af := sysAF_UNSPEC + for i := range as { + if as[i] != nil { + ts |= 1 << uint(i) + } + switch as[i].(type) { + case *Inet4Addr: + if af == sysAF_UNSPEC { + af = sysAF_INET + } + if af != sysAF_INET { + return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af)) + } + case *Inet6Addr: + if af == sysAF_UNSPEC { + af = sysAF_INET6 + } + if af != sysAF_INET6 { + return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af)) + } + } + } + if ts != attrs && ts > attrs { + return fmt.Errorf("%v not included in %v", ts, attrs) + } + return nil +} + +func fetchAndParseRIB(af int, typ RIBType) ([]Message, error) { + var err error + var b []byte + for i := 0; i < 3; i++ { + if b, err = FetchRIB(af, typ, 0); err != nil { + time.Sleep(10 * time.Millisecond) + continue + } + break + } + if err != nil { + return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err) + } + ms, err := ParseRIB(typ, b) + if err != nil { + return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err) + } + return ms, nil +} + +// propVirtual is a proprietary virtual network interface. +type propVirtual struct { + name string + addr, mask string + setupCmds []*exec.Cmd + teardownCmds []*exec.Cmd +} + +func (pv *propVirtual) setup() error { + for _, cmd := range pv.setupCmds { + if err := cmd.Run(); err != nil { + pv.teardown() + return err + } + } + return nil +} + +func (pv *propVirtual) teardown() error { + for _, cmd := range pv.teardownCmds { + if err := cmd.Run(); err != nil { + return err + } + } + return nil +} + +func (pv *propVirtual) configure(suffix int) error { + if runtime.GOOS == "openbsd" { + pv.name = fmt.Sprintf("vether%d", suffix) + } else { + pv.name = fmt.Sprintf("vlan%d", suffix) + } + xname, err := exec.LookPath("ifconfig") + if err != nil { + return err + } + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "create"}, + }) + if runtime.GOOS == "netbsd" { + // NetBSD requires an underlying dot1Q-capable network + // interface. + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "vlan", fmt.Sprintf("%d", suffix&0xfff), "vlanif", "wm0"}, + }) + } + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "inet", pv.addr, "netmask", pv.mask}, + }) + pv.teardownCmds = append(pv.teardownCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "destroy"}, + }) + return nil +} diff --git a/vendor/golang.org/x/net/route/sys.go b/vendor/golang.org/x/net/route/sys.go new file mode 100644 index 000000000..3d0ee9b14 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys.go @@ -0,0 +1,39 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import "unsafe" + +var ( + nativeEndian binaryByteOrder + kernelAlign int + wireFormats map[int]*wireFormat +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = littleEndian + } else { + nativeEndian = bigEndian + } + kernelAlign, wireFormats = probeRoutingStack() +} + +func roundup(l int) int { + if l == 0 { + return kernelAlign + } + return (l + kernelAlign - 1) & ^(kernelAlign - 1) +} + +type wireFormat struct { + extOff int // offset of header extension + bodyOff int // offset of message body + parse func(RIBType, []byte) (Message, error) +} diff --git a/vendor/golang.org/x/net/route/sys_darwin.go b/vendor/golang.org/x/net/route/sys_darwin.go new file mode 100644 index 000000000..d2daf5c05 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_darwin.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (typ RIBType) parseable() bool { + switch typ { + case sysNET_RT_STAT, sysNET_RT_TRASH: + return false + default: + return true + } +} + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + rtm := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdrDarwin15} + rtm.parse = rtm.parseRouteMessage + rtm2 := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdr2Darwin15} + rtm2.parse = rtm2.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDarwin15} + ifm.parse = ifm.parseInterfaceMessage + ifm2 := &wireFormat{extOff: 32, bodyOff: sizeofIfMsghdr2Darwin15} + ifm2.parse = ifm2.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrDarwin15, bodyOff: sizeofIfaMsghdrDarwin15} + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDarwin15, bodyOff: sizeofIfmaMsghdrDarwin15} + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifmam2 := &wireFormat{extOff: sizeofIfmaMsghdr2Darwin15, bodyOff: sizeofIfmaMsghdr2Darwin15} + ifmam2.parse = ifmam2.parseInterfaceMulticastAddrMessage + // Darwin kernels require 32-bit aligned access to routing facilities. + return 4, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFINFO2: ifm2, + sysRTM_NEWMADDR2: ifmam2, + sysRTM_GET2: rtm2, + } +} diff --git a/vendor/golang.org/x/net/route/sys_dragonfly.go b/vendor/golang.org/x/net/route/sys_dragonfly.go new file mode 100644 index 000000000..0c14bc2b4 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_dragonfly.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "unsafe" + +func (typ RIBType) parseable() bool { return true } + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrDragonFlyBSD4} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDragonFlyBSD4} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrDragonFlyBSD4, bodyOff: sizeofIfaMsghdrDragonFlyBSD4} + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDragonFlyBSD4, bodyOff: sizeofIfmaMsghdrDragonFlyBSD4} + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrDragonFlyBSD4, bodyOff: sizeofIfAnnouncemsghdrDragonFlyBSD4} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return int(unsafe.Sizeof(p)), map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFANNOUNCE: ifanm, + } +} diff --git a/vendor/golang.org/x/net/route/sys_freebsd.go b/vendor/golang.org/x/net/route/sys_freebsd.go new file mode 100644 index 000000000..89ba1c4e2 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_freebsd.go @@ -0,0 +1,155 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import ( + "syscall" + "unsafe" +) + +func (typ RIBType) parseable() bool { return true } + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + if kernelAlign == 8 { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } + } + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + wordSize := int(unsafe.Sizeof(p)) + align := int(unsafe.Sizeof(p)) + // In the case of kern.supported_archs="amd64 i386", we need + // to know the underlying kernel's architecture because the + // alignment for routing facilities are set at the build time + // of the kernel. + conf, _ := syscall.Sysctl("kern.conftxt") + for i, j := 0, 0; j < len(conf); j++ { + if conf[j] != '\n' { + continue + } + s := conf[i:j] + i = j + 1 + if len(s) > len("machine") && s[:len("machine")] == "machine" { + s = s[len("machine"):] + for k := 0; k < len(s); k++ { + if s[k] == ' ' || s[k] == '\t' { + s = s[1:] + } + break + } + if s == "amd64" { + align = 8 + } + break + } + } + var rtm, ifm, ifam, ifmam, ifanm *wireFormat + if align != wordSize { // 386 emulation on amd64 + rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10Emu - sizeofRtMetricsFreeBSD10Emu, bodyOff: sizeofRtMsghdrFreeBSD10Emu} + ifm = &wireFormat{extOff: 16} + ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10Emu, bodyOff: sizeofIfaMsghdrFreeBSD10Emu} + ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10Emu, bodyOff: sizeofIfmaMsghdrFreeBSD10Emu} + ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10Emu, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10Emu} + } else { + rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10 - sizeofRtMetricsFreeBSD10, bodyOff: sizeofRtMsghdrFreeBSD10} + ifm = &wireFormat{extOff: 16} + ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10, bodyOff: sizeofIfaMsghdrFreeBSD10} + ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10, bodyOff: sizeofIfmaMsghdrFreeBSD10} + ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10} + } + rel, _ := syscall.SysctlUint32("kern.osreldate") + switch { + case rel < 800000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD7Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD7 + } + case 800000 <= rel && rel < 900000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD8Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD8 + } + case 900000 <= rel && rel < 1000000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD9Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD9 + } + case 1000000 <= rel && rel < 1100000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD10Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD10 + } + default: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD11Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD11 + } + } + rtm.parse = rtm.parseRouteMessage + ifm.parse = ifm.parseInterfaceMessage + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return align, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFANNOUNCE: ifanm, + } +} diff --git a/vendor/golang.org/x/net/route/sys_netbsd.go b/vendor/golang.org/x/net/route/sys_netbsd.go new file mode 100644 index 000000000..02f71d54b --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_netbsd.go @@ -0,0 +1,71 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (typ RIBType) parseable() bool { return true } + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } +} + +// RouteMetrics represents route metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrNetBSD7} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrNetBSD7} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrNetBSD7, bodyOff: sizeofIfaMsghdrNetBSD7} + ifam.parse = ifam.parseInterfaceAddrMessage + ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrNetBSD7, bodyOff: sizeofIfAnnouncemsghdrNetBSD7} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + // NetBSD 6 and above kernels require 64-bit aligned access to + // routing facilities. + return 8, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFANNOUNCE: ifanm, + sysRTM_IFINFO: ifm, + } +} diff --git a/vendor/golang.org/x/net/route/sys_openbsd.go b/vendor/golang.org/x/net/route/sys_openbsd.go new file mode 100644 index 000000000..c5674e83d --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_openbsd.go @@ -0,0 +1,80 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "unsafe" + +func (typ RIBType) parseable() bool { + switch typ { + case sysNET_RT_STATS, sysNET_RT_TABLE: + return false + default: + return true + } +} + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[60:64])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[24]), + MTU: int(nativeEndian.Uint32(m.raw[28:32])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + rtm := &wireFormat{extOff: -1, bodyOff: -1} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: -1, bodyOff: -1} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: -1, bodyOff: -1} + ifam.parse = ifam.parseInterfaceAddrMessage + ifanm := &wireFormat{extOff: -1, bodyOff: -1} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return int(unsafe.Sizeof(p)), map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_IFANNOUNCE: ifanm, + sysRTM_DESYNC: rtm, + } +} diff --git a/vendor/golang.org/x/net/route/syscall.go b/vendor/golang.org/x/net/route/syscall.go new file mode 100644 index 000000000..5f69ea63d --- /dev/null +++ b/vendor/golang.org/x/net/route/syscall.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "syscall" + "unsafe" +) + +var zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var p unsafe.Pointer + if len(mib) > 0 { + p = unsafe.Pointer(&mib[0]) + } else { + p = unsafe.Pointer(&zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), newlen) + if errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/route/zsys_darwin.go b/vendor/golang.org/x/net/route/zsys_darwin.go new file mode 100644 index 000000000..4e2e1ab09 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_darwin.go @@ -0,0 +1,99 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_STAT = 0x4 + sysNET_RT_TRASH = 0x5 + sysNET_RT_IFLIST2 = 0x6 + sysNET_RT_DUMP2 = 0x7 + sysNET_RT_MAXID = 0xa +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_MAXID = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFINFO2 = 0x12 + sysRTM_NEWMADDR2 = 0x13 + sysRTM_GET2 = 0x14 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrDarwin15 = 0x70 + sizeofIfaMsghdrDarwin15 = 0x14 + sizeofIfmaMsghdrDarwin15 = 0x10 + sizeofIfMsghdr2Darwin15 = 0xa0 + sizeofIfmaMsghdr2Darwin15 = 0x14 + sizeofIfDataDarwin15 = 0x60 + sizeofIfData64Darwin15 = 0x80 + + sizeofRtMsghdrDarwin15 = 0x5c + sizeofRtMsghdr2Darwin15 = 0x5c + sizeofRtMetricsDarwin15 = 0x38 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_dragonfly.go b/vendor/golang.org/x/net/route/zsys_dragonfly.go new file mode 100644 index 000000000..719c88d11 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_dragonfly.go @@ -0,0 +1,98 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_MAXID = 0x4 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 + sysCTL_LWKT = 0xa + sysCTL_MAXID = 0xb +) + +const ( + sysRTM_VERSION = 0x6 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_MPLS1 = 0x100 + sysRTA_MPLS2 = 0x200 + sysRTA_MPLS3 = 0x400 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MPLS1 = 0x8 + sysRTAX_MPLS2 = 0x9 + sysRTAX_MPLS3 = 0xa + sysRTAX_MAX = 0xb +) + +const ( + sizeofIfMsghdrDragonFlyBSD4 = 0xb0 + sizeofIfaMsghdrDragonFlyBSD4 = 0x14 + sizeofIfmaMsghdrDragonFlyBSD4 = 0x10 + sizeofIfAnnouncemsghdrDragonFlyBSD4 = 0x18 + + sizeofRtMsghdrDragonFlyBSD4 = 0x98 + sizeofRtMetricsDragonFlyBSD4 = 0x70 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_386.go b/vendor/golang.org/x/net/route/zsys_freebsd_386.go new file mode 100644 index 000000000..b03bc01f6 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_386.go @@ -0,0 +1,126 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0x68 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0x6c + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x5c + sizeofRtMetricsFreeBSD10 = 0x38 + + sizeofIfMsghdrFreeBSD7 = 0x60 + sizeofIfMsghdrFreeBSD8 = 0x60 + sizeofIfMsghdrFreeBSD9 = 0x60 + sizeofIfMsghdrFreeBSD10 = 0x64 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x50 + sizeofIfDataFreeBSD8 = 0x50 + sizeofIfDataFreeBSD9 = 0x50 + sizeofIfDataFreeBSD10 = 0x54 + sizeofIfDataFreeBSD11 = 0x98 + + // MODIFIED BY HAND FOR 386 EMULATION ON AMD64 + // 386 EMULATION USES THE UNDERLYING RAW DATA LAYOUT + + sizeofIfMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x98 + sizeofRtMetricsFreeBSD10Emu = 0x70 + + sizeofIfMsghdrFreeBSD7Emu = 0xa8 + sizeofIfMsghdrFreeBSD8Emu = 0xa8 + sizeofIfMsghdrFreeBSD9Emu = 0xa8 + sizeofIfMsghdrFreeBSD10Emu = 0xa8 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x98 + sizeofIfDataFreeBSD8Emu = 0x98 + sizeofIfDataFreeBSD9Emu = 0x98 + sizeofIfDataFreeBSD10Emu = 0x98 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go new file mode 100644 index 000000000..0b675b3d3 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go @@ -0,0 +1,123 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0xb0 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0xb0 + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x98 + sizeofRtMetricsFreeBSD10 = 0x70 + + sizeofIfMsghdrFreeBSD7 = 0xa8 + sizeofIfMsghdrFreeBSD8 = 0xa8 + sizeofIfMsghdrFreeBSD9 = 0xa8 + sizeofIfMsghdrFreeBSD10 = 0xa8 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x98 + sizeofIfDataFreeBSD8 = 0x98 + sizeofIfDataFreeBSD9 = 0x98 + sizeofIfDataFreeBSD10 = 0x98 + sizeofIfDataFreeBSD11 = 0x98 + + sizeofIfMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x98 + sizeofRtMetricsFreeBSD10Emu = 0x70 + + sizeofIfMsghdrFreeBSD7Emu = 0xa8 + sizeofIfMsghdrFreeBSD8Emu = 0xa8 + sizeofIfMsghdrFreeBSD9Emu = 0xa8 + sizeofIfMsghdrFreeBSD10Emu = 0xa8 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x98 + sizeofIfDataFreeBSD8Emu = 0x98 + sizeofIfDataFreeBSD9Emu = 0x98 + sizeofIfDataFreeBSD10Emu = 0x98 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_arm.go b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go new file mode 100644 index 000000000..58f8ea16f --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go @@ -0,0 +1,123 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0x68 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0x6c + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x5c + sizeofRtMetricsFreeBSD10 = 0x38 + + sizeofIfMsghdrFreeBSD7 = 0x70 + sizeofIfMsghdrFreeBSD8 = 0x70 + sizeofIfMsghdrFreeBSD9 = 0x70 + sizeofIfMsghdrFreeBSD10 = 0x70 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x60 + sizeofIfDataFreeBSD8 = 0x60 + sizeofIfDataFreeBSD9 = 0x60 + sizeofIfDataFreeBSD10 = 0x60 + sizeofIfDataFreeBSD11 = 0x98 + + sizeofIfMsghdrlFreeBSD10Emu = 0x68 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0x6c + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x5c + sizeofRtMetricsFreeBSD10Emu = 0x38 + + sizeofIfMsghdrFreeBSD7Emu = 0x70 + sizeofIfMsghdrFreeBSD8Emu = 0x70 + sizeofIfMsghdrFreeBSD9Emu = 0x70 + sizeofIfMsghdrFreeBSD10Emu = 0x70 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x60 + sizeofIfDataFreeBSD8Emu = 0x60 + sizeofIfDataFreeBSD9Emu = 0x60 + sizeofIfDataFreeBSD10Emu = 0x60 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_netbsd.go b/vendor/golang.org/x/net/route/zsys_netbsd.go new file mode 100644 index 000000000..e0df45e8b --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_netbsd.go @@ -0,0 +1,97 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x22 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x5 + sysNET_RT_MAXID = 0x6 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_DDB = 0x9 + sysCTL_PROC = 0xa + sysCTL_VENDOR = 0xb + sysCTL_EMUL = 0xc + sysCTL_SECURITY = 0xd + sysCTL_MAXID = 0xe +) + +const ( + sysRTM_VERSION = 0x4 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFANNOUNCE = 0x10 + sysRTM_IEEE80211 = 0x11 + sysRTM_SETGATE = 0x12 + sysRTM_LLINFO_UPD = 0x13 + sysRTM_IFINFO = 0x14 + sysRTM_CHGADDR = 0x15 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_TAG = 0x100 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_TAG = 0x8 + sysRTAX_MAX = 0x9 +) + +const ( + sizeofIfMsghdrNetBSD7 = 0x98 + sizeofIfaMsghdrNetBSD7 = 0x18 + sizeofIfAnnouncemsghdrNetBSD7 = 0x18 + + sizeofRtMsghdrNetBSD7 = 0x78 + sizeofRtMetricsNetBSD7 = 0x50 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_openbsd.go b/vendor/golang.org/x/net/route/zsys_openbsd.go new file mode 100644 index 000000000..db8c8efb4 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_openbsd.go @@ -0,0 +1,101 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_STATS = 0x4 + sysNET_RT_TABLE = 0x5 + sysNET_RT_IFNAMES = 0x6 + sysNET_RT_MAXID = 0x7 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_FS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_DDB = 0x9 + sysCTL_VFS = 0xa + sysCTL_MAXID = 0xb +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_IFANNOUNCE = 0xf + sysRTM_DESYNC = 0x10 + sysRTM_INVALIDATE = 0x11 + sysRTM_BFD = 0x12 + sysRTM_PROPOSAL = 0x13 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_SRC = 0x100 + sysRTA_SRCMASK = 0x200 + sysRTA_LABEL = 0x400 + sysRTA_BFD = 0x800 + sysRTA_DNS = 0x1000 + sysRTA_STATIC = 0x2000 + sysRTA_SEARCH = 0x4000 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_SRC = 0x8 + sysRTAX_SRCMASK = 0x9 + sysRTAX_LABEL = 0xa + sysRTAX_BFD = 0xb + sysRTAX_DNS = 0xc + sysRTAX_STATIC = 0xd + sysRTAX_SEARCH = 0xe + sysRTAX_MAX = 0xf +) + +const ( + sizeofRtMsghdr = 0x60 + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 000000000..c646a6952 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

    /debug/events

    + +
  • + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
    {{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
    + +{{if $.EventLogs}} +


    +

    Family: {{$.Family}}

    + +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
    WhenElapsed
    {{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
    {{$el.Stack|trimSpace}}
    {{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
    +{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 000000000..9bf4286c7 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
    Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
    +
    + +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
    [{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
    +`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/histogram_test.go b/vendor/golang.org/x/net/trace/histogram_test.go new file mode 100644 index 000000000..d384b9332 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram_test.go @@ -0,0 +1,325 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "math" + "testing" +) + +type sumTest struct { + value int64 + sum int64 + sumOfSquares float64 + total int64 +} + +var sumTests = []sumTest{ + {100, 100, 10000, 1}, + {50, 150, 12500, 2}, + {50, 200, 15000, 3}, + {50, 250, 17500, 4}, +} + +type bucketingTest struct { + in int64 + log int + bucket int +} + +var bucketingTests = []bucketingTest{ + {0, 0, 0}, + {1, 1, 0}, + {2, 2, 1}, + {3, 2, 1}, + {4, 3, 2}, + {1000, 10, 9}, + {1023, 10, 9}, + {1024, 11, 10}, + {1000000, 20, 19}, +} + +type multiplyTest struct { + in int64 + ratio float64 + expectedSum int64 + expectedTotal int64 + expectedSumOfSquares float64 +} + +var multiplyTests = []multiplyTest{ + {15, 2.5, 37, 2, 562.5}, + {128, 4.6, 758, 13, 77953.9}, +} + +type percentileTest struct { + fraction float64 + expected int64 +} + +var percentileTests = []percentileTest{ + {0.25, 48}, + {0.5, 96}, + {0.6, 109}, + {0.75, 128}, + {0.90, 205}, + {0.95, 230}, + {0.99, 256}, +} + +func TestSum(t *testing.T) { + var h histogram + + for _, test := range sumTests { + h.addMeasurement(test.value) + sum := h.sum + if sum != test.sum { + t.Errorf("h.Sum = %v WANT: %v", sum, test.sum) + } + + sumOfSquares := h.sumOfSquares + if sumOfSquares != test.sumOfSquares { + t.Errorf("h.SumOfSquares = %v WANT: %v", sumOfSquares, test.sumOfSquares) + } + + total := h.total() + if total != test.total { + t.Errorf("h.Total = %v WANT: %v", total, test.total) + } + } +} + +func TestMultiply(t *testing.T) { + var h histogram + for i, test := range multiplyTests { + h.addMeasurement(test.in) + h.Multiply(test.ratio) + if h.sum != test.expectedSum { + t.Errorf("#%v: h.sum = %v WANT: %v", i, h.sum, test.expectedSum) + } + if h.total() != test.expectedTotal { + t.Errorf("#%v: h.total = %v WANT: %v", i, h.total(), test.expectedTotal) + } + if h.sumOfSquares != test.expectedSumOfSquares { + t.Errorf("#%v: h.SumOfSquares = %v WANT: %v", i, test.expectedSumOfSquares, h.sumOfSquares) + } + } +} + +func TestBucketingFunctions(t *testing.T) { + for _, test := range bucketingTests { + log := log2(test.in) + if log != test.log { + t.Errorf("log2 = %v WANT: %v", log, test.log) + } + + bucket := getBucket(test.in) + if bucket != test.bucket { + t.Errorf("getBucket = %v WANT: %v", bucket, test.bucket) + } + } +} + +func TestAverage(t *testing.T) { + a := new(histogram) + average := a.average() + if average != 0 { + t.Errorf("Average of empty histogram was %v WANT: 0", average) + } + + a.addMeasurement(1) + a.addMeasurement(1) + a.addMeasurement(3) + const expected = float64(5) / float64(3) + average = a.average() + + if !isApproximate(average, expected) { + t.Errorf("Average = %g WANT: %v", average, expected) + } +} + +func TestStandardDeviation(t *testing.T) { + a := new(histogram) + add(a, 10, 1<<4) + add(a, 10, 1<<5) + add(a, 10, 1<<6) + stdDev := a.standardDeviation() + const expected = 19.95 + + if !isApproximate(stdDev, expected) { + t.Errorf("StandardDeviation = %v WANT: %v", stdDev, expected) + } + + // No values + a = new(histogram) + stdDev = a.standardDeviation() + + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } + + add(a, 1, 1<<4) + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } + + add(a, 10, 1<<4) + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } +} + +func TestPercentileBoundary(t *testing.T) { + a := new(histogram) + add(a, 5, 1<<4) + add(a, 10, 1<<6) + add(a, 5, 1<<7) + + for _, test := range percentileTests { + percentile := a.percentileBoundary(test.fraction) + if percentile != test.expected { + t.Errorf("h.PercentileBoundary (fraction=%v) = %v WANT: %v", test.fraction, percentile, test.expected) + } + } +} + +func TestCopyFrom(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := histogram{6, 36, []int64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, 5, -1} + + a.CopyFrom(&b) + + if a.String() != b.String() { + t.Errorf("a.String = %s WANT: %s", a.String(), b.String()) + } +} + +func TestClear(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + + a.Clear() + + expected := "0, 0.000000, 0, 0, []" + if a.String() != expected { + t.Errorf("a.String = %s WANT %s", a.String(), expected) + } +} + +func TestNew(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := a.New() + + expected := "0, 0.000000, 0, 0, []" + if b.(*histogram).String() != expected { + t.Errorf("b.(*histogram).String = %s WANT: %s", b.(*histogram).String(), expected) + } +} + +func TestAdd(t *testing.T) { + // The tests here depend on the associativity of addMeasurement and Add. + // Add empty observation + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := a.New() + + expected := a.String() + a.Add(b) + if a.String() != expected { + t.Errorf("a.String = %s WANT: %s", a.String(), expected) + } + + // Add same bucketed value, no new buckets + c := new(histogram) + d := new(histogram) + e := new(histogram) + c.addMeasurement(12) + d.addMeasurement(11) + e.addMeasurement(12) + e.addMeasurement(11) + c.Add(d) + if c.String() != e.String() { + t.Errorf("c.String = %s WANT: %s", c.String(), e.String()) + } + + // Add bucketed values + f := new(histogram) + g := new(histogram) + h := new(histogram) + f.addMeasurement(4) + f.addMeasurement(12) + f.addMeasurement(100) + g.addMeasurement(18) + g.addMeasurement(36) + g.addMeasurement(255) + h.addMeasurement(4) + h.addMeasurement(12) + h.addMeasurement(100) + h.addMeasurement(18) + h.addMeasurement(36) + h.addMeasurement(255) + f.Add(g) + if f.String() != h.String() { + t.Errorf("f.String = %q WANT: %q", f.String(), h.String()) + } + + // add buckets to no buckets + i := new(histogram) + j := new(histogram) + k := new(histogram) + j.addMeasurement(18) + j.addMeasurement(36) + j.addMeasurement(255) + k.addMeasurement(18) + k.addMeasurement(36) + k.addMeasurement(255) + i.Add(j) + if i.String() != k.String() { + t.Errorf("i.String = %q WANT: %q", i.String(), k.String()) + } + + // add buckets to single value (no overlap) + l := new(histogram) + m := new(histogram) + n := new(histogram) + l.addMeasurement(0) + m.addMeasurement(18) + m.addMeasurement(36) + m.addMeasurement(255) + n.addMeasurement(0) + n.addMeasurement(18) + n.addMeasurement(36) + n.addMeasurement(255) + l.Add(m) + if l.String() != n.String() { + t.Errorf("l.String = %q WANT: %q", l.String(), n.String()) + } + + // mixed order + o := new(histogram) + p := new(histogram) + o.addMeasurement(0) + o.addMeasurement(2) + o.addMeasurement(0) + p.addMeasurement(0) + p.addMeasurement(0) + p.addMeasurement(2) + if o.String() != p.String() { + t.Errorf("o.String = %q WANT: %q", o.String(), p.String()) + } +} + +func add(h *histogram, times int, val int64) { + for i := 0; i < times; i++ { + h.addMeasurement(val) + } +} + +func isApproximate(x, y float64) bool { + return math.Abs(x-y) < 1e-2 +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 000000000..a46ee0eaa --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1103 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc("/debug/requests", Traces) + http.HandleFunc("/debug/events", Events) +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

    /debug/requests

    +{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
    {{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
    +{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
    +

    Family: {{$.Family}}

    + +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

    Showing {{len $.Traces}} of {{$.Total}} traces.

    +{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
    + {{if $.Active}}Active{{else}}Completed{{end}} Requests +
    WhenElapsed (s)
    {{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
    {{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
    +{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

    Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

    +{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/net/trace/trace_go16.go b/vendor/golang.org/x/net/trace/trace_go16.go new file mode 100644 index 000000000..d60819118 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go16.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package trace + +import "golang.org/x/net/context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/trace/trace_go17.go b/vendor/golang.org/x/net/trace/trace_go17.go new file mode 100644 index 000000000..df6e1fba7 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package trace + +import "context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/trace/trace_test.go b/vendor/golang.org/x/net/trace/trace_test.go new file mode 100644 index 000000000..bfd9dfe94 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_test.go @@ -0,0 +1,178 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "net/http" + "reflect" + "testing" +) + +type s struct{} + +func (s) String() string { return "lazy string" } + +// TestReset checks whether all the fields are zeroed after reset. +func TestReset(t *testing.T) { + tr := New("foo", "bar") + tr.LazyLog(s{}, false) + tr.LazyPrintf("%d", 1) + tr.SetRecycler(func(_ interface{}) {}) + tr.SetTraceInfo(3, 4) + tr.SetMaxEvents(100) + tr.SetError() + tr.Finish() + + tr.(*trace).reset() + + if !reflect.DeepEqual(tr, new(trace)) { + t.Errorf("reset didn't clear all fields: %+v", tr) + } +} + +// TestResetLog checks whether all the fields are zeroed after reset. +func TestResetLog(t *testing.T) { + el := NewEventLog("foo", "bar") + el.Printf("message") + el.Errorf("error") + el.Finish() + + el.(*eventLog).reset() + + if !reflect.DeepEqual(el, new(eventLog)) { + t.Errorf("reset didn't clear all fields: %+v", el) + } +} + +func TestAuthRequest(t *testing.T) { + testCases := []struct { + host string + want bool + }{ + {host: "192.168.23.1", want: false}, + {host: "192.168.23.1:8080", want: false}, + {host: "malformed remote addr", want: false}, + {host: "localhost", want: true}, + {host: "localhost:8080", want: true}, + {host: "127.0.0.1", want: true}, + {host: "127.0.0.1:8080", want: true}, + {host: "::1", want: true}, + {host: "[::1]:8080", want: true}, + } + for _, tt := range testCases { + req := &http.Request{RemoteAddr: tt.host} + any, sensitive := AuthRequest(req) + if any != tt.want || sensitive != tt.want { + t.Errorf("AuthRequest(%q) = %t, %t; want %t, %t", tt.host, any, sensitive, tt.want, tt.want) + } + } +} + +// TestParseTemplate checks that all templates used by this package are valid +// as they are parsed on first usage +func TestParseTemplate(t *testing.T) { + if tmpl := distTmpl(); tmpl == nil { + t.Error("invalid template returned from distTmpl()") + } + if tmpl := pageTmpl(); tmpl == nil { + t.Error("invalid template returned from pageTmpl()") + } + if tmpl := eventsTmpl(); tmpl == nil { + t.Error("invalid template returned from eventsTmpl()") + } +} + +func benchmarkTrace(b *testing.B, maxEvents, numEvents int) { + numSpans := (b.N + numEvents + 1) / numEvents + + for i := 0; i < numSpans; i++ { + tr := New("test", "test") + tr.SetMaxEvents(maxEvents) + for j := 0; j < numEvents; j++ { + tr.LazyPrintf("%d", j) + } + tr.Finish() + } +} + +func BenchmarkTrace_Default_2(b *testing.B) { + benchmarkTrace(b, 0, 2) +} + +func BenchmarkTrace_Default_10(b *testing.B) { + benchmarkTrace(b, 0, 10) +} + +func BenchmarkTrace_Default_100(b *testing.B) { + benchmarkTrace(b, 0, 100) +} + +func BenchmarkTrace_Default_1000(b *testing.B) { + benchmarkTrace(b, 0, 1000) +} + +func BenchmarkTrace_Default_10000(b *testing.B) { + benchmarkTrace(b, 0, 10000) +} + +func BenchmarkTrace_10_2(b *testing.B) { + benchmarkTrace(b, 10, 2) +} + +func BenchmarkTrace_10_10(b *testing.B) { + benchmarkTrace(b, 10, 10) +} + +func BenchmarkTrace_10_100(b *testing.B) { + benchmarkTrace(b, 10, 100) +} + +func BenchmarkTrace_10_1000(b *testing.B) { + benchmarkTrace(b, 10, 1000) +} + +func BenchmarkTrace_10_10000(b *testing.B) { + benchmarkTrace(b, 10, 10000) +} + +func BenchmarkTrace_100_2(b *testing.B) { + benchmarkTrace(b, 100, 2) +} + +func BenchmarkTrace_100_10(b *testing.B) { + benchmarkTrace(b, 100, 10) +} + +func BenchmarkTrace_100_100(b *testing.B) { + benchmarkTrace(b, 100, 100) +} + +func BenchmarkTrace_100_1000(b *testing.B) { + benchmarkTrace(b, 100, 1000) +} + +func BenchmarkTrace_100_10000(b *testing.B) { + benchmarkTrace(b, 100, 10000) +} + +func BenchmarkTrace_1000_2(b *testing.B) { + benchmarkTrace(b, 1000, 2) +} + +func BenchmarkTrace_1000_10(b *testing.B) { + benchmarkTrace(b, 1000, 10) +} + +func BenchmarkTrace_1000_100(b *testing.B) { + benchmarkTrace(b, 1000, 100) +} + +func BenchmarkTrace_1000_1000(b *testing.B) { + benchmarkTrace(b, 1000, 1000) +} + +func BenchmarkTrace_1000_10000(b *testing.B) { + benchmarkTrace(b, 1000, 10000) +} diff --git a/vendor/golang.org/x/net/webdav/file.go b/vendor/golang.org/x/net/webdav/file.go new file mode 100644 index 000000000..748118dd3 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file.go @@ -0,0 +1,796 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "io" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "sync" + "time" + + "golang.org/x/net/context" +) + +// slashClean is equivalent to but slightly more efficient than +// path.Clean("/" + name). +func slashClean(name string) string { + if name == "" || name[0] != '/' { + name = "/" + name + } + return path.Clean(name) +} + +// A FileSystem implements access to a collection of named files. The elements +// in a file path are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +// +// Each method has the same semantics as the os package's function of the same +// name. +// +// Note that the os.Rename documentation says that "OS-specific restrictions +// might apply". In particular, whether or not renaming a file or directory +// overwriting another existing file or directory is an error is OS-dependent. +type FileSystem interface { + Mkdir(ctx context.Context, name string, perm os.FileMode) error + OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) + RemoveAll(ctx context.Context, name string) error + Rename(ctx context.Context, oldName, newName string) error + Stat(ctx context.Context, name string) (os.FileInfo, error) +} + +// A File is returned by a FileSystem's OpenFile method and can be served by a +// Handler. +// +// A File may optionally implement the DeadPropsHolder interface, if it can +// load and save dead properties. +type File interface { + http.File + io.Writer +} + +// A Dir implements FileSystem using the native file system restricted to a +// specific directory tree. +// +// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's +// string value is a filename on the native file system, not a URL, so it is +// separated by filepath.Separator, which isn't necessarily '/'. +// +// An empty Dir is treated as ".". +type Dir string + +func (d Dir) resolve(name string) string { + // This implementation is based on Dir.Open's code in the standard net/http package. + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return "" + } + dir := string(d) + if dir == "" { + dir = "." + } + return filepath.Join(dir, filepath.FromSlash(slashClean(name))) +} + +func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + return os.Mkdir(name, perm) +} + +func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + f, err := os.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + return f, nil +} + +func (d Dir) RemoveAll(ctx context.Context, name string) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + if name == filepath.Clean(string(d)) { + // Prohibit removing the virtual root directory. + return os.ErrInvalid + } + return os.RemoveAll(name) +} + +func (d Dir) Rename(ctx context.Context, oldName, newName string) error { + if oldName = d.resolve(oldName); oldName == "" { + return os.ErrNotExist + } + if newName = d.resolve(newName); newName == "" { + return os.ErrNotExist + } + if root := filepath.Clean(string(d)); root == oldName || root == newName { + // Prohibit renaming from or to the virtual root directory. + return os.ErrInvalid + } + return os.Rename(oldName, newName) +} + +func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + return os.Stat(name) +} + +// NewMemFS returns a new in-memory FileSystem implementation. +func NewMemFS() FileSystem { + return &memFS{ + root: memFSNode{ + children: make(map[string]*memFSNode), + mode: 0660 | os.ModeDir, + modTime: time.Now(), + }, + } +} + +// A memFS implements FileSystem, storing all metadata and actual file data +// in-memory. No limits on filesystem size are used, so it is not recommended +// this be used where the clients are untrusted. +// +// Concurrent access is permitted. The tree structure is protected by a mutex, +// and each node's contents and metadata are protected by a per-node mutex. +// +// TODO: Enforce file permissions. +type memFS struct { + mu sync.Mutex + root memFSNode +} + +// TODO: clean up and rationalize the walk/find code. + +// walk walks the directory tree for the fullname, calling f at each step. If f +// returns an error, the walk will be aborted and return that same error. +// +// dir is the directory at that step, frag is the name fragment, and final is +// whether it is the final step. For example, walking "/foo/bar/x" will result +// in 3 calls to f: +// - "/", "foo", false +// - "/foo/", "bar", false +// - "/foo/bar/", "x", true +// The frag argument will be empty only if dir is the root node and the walk +// ends at that root node. +func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error { + original := fullname + fullname = slashClean(fullname) + + // Strip any leading "/"s to make fullname a relative path, as the walk + // starts at fs.root. + if fullname[0] == '/' { + fullname = fullname[1:] + } + dir := &fs.root + + for { + frag, remaining := fullname, "" + i := strings.IndexRune(fullname, '/') + final := i < 0 + if !final { + frag, remaining = fullname[:i], fullname[i+1:] + } + if frag == "" && dir != &fs.root { + panic("webdav: empty path fragment for a clean path") + } + if err := f(dir, frag, final); err != nil { + return &os.PathError{ + Op: op, + Path: original, + Err: err, + } + } + if final { + break + } + child := dir.children[frag] + if child == nil { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrNotExist, + } + } + if !child.mode.IsDir() { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrInvalid, + } + } + dir, fullname = child, remaining + } + return nil +} + +// find returns the parent of the named node and the relative name fragment +// from the parent to the child. For example, if finding "/foo/bar/baz" then +// parent will be the node for "/foo/bar" and frag will be "baz". +// +// If the fullname names the root node, then parent, frag and err will be zero. +// +// find returns an error if the parent does not already exist or the parent +// isn't a directory, but it will not return an error per se if the child does +// not already exist. The error returned is either nil or an *os.PathError +// whose Op is op. +func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) { + err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error { + if !final { + return nil + } + if frag0 != "" { + parent, frag = parent0, frag0 + } + return nil + }) + return parent, frag, err +} + +func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("mkdir", name) + if err != nil { + return err + } + if dir == nil { + // We can't create the root. + return os.ErrInvalid + } + if _, ok := dir.children[frag]; ok { + return os.ErrExist + } + dir.children[frag] = &memFSNode{ + children: make(map[string]*memFSNode), + mode: perm.Perm() | os.ModeDir, + modTime: time.Now(), + } + return nil +} + +func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("open", name) + if err != nil { + return nil, err + } + var n *memFSNode + if dir == nil { + // We're opening the root. + if flag&(os.O_WRONLY|os.O_RDWR) != 0 { + return nil, os.ErrPermission + } + n, frag = &fs.root, "/" + + } else { + n = dir.children[frag] + if flag&(os.O_SYNC|os.O_APPEND) != 0 { + // memFile doesn't support these flags yet. + return nil, os.ErrInvalid + } + if flag&os.O_CREATE != 0 { + if flag&os.O_EXCL != 0 && n != nil { + return nil, os.ErrExist + } + if n == nil { + n = &memFSNode{ + mode: perm.Perm(), + } + dir.children[frag] = n + } + } + if n == nil { + return nil, os.ErrNotExist + } + if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 { + n.mu.Lock() + n.data = nil + n.mu.Unlock() + } + } + + children := make([]os.FileInfo, 0, len(n.children)) + for cName, c := range n.children { + children = append(children, c.stat(cName)) + } + return &memFile{ + n: n, + nameSnapshot: frag, + childrenSnapshot: children, + }, nil +} + +func (fs *memFS) RemoveAll(ctx context.Context, name string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("remove", name) + if err != nil { + return err + } + if dir == nil { + // We can't remove the root. + return os.ErrInvalid + } + delete(dir.children, frag) + return nil +} + +func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + oldName = slashClean(oldName) + newName = slashClean(newName) + if oldName == newName { + return nil + } + if strings.HasPrefix(newName, oldName+"/") { + // We can't rename oldName to be a sub-directory of itself. + return os.ErrInvalid + } + + oDir, oFrag, err := fs.find("rename", oldName) + if err != nil { + return err + } + if oDir == nil { + // We can't rename from the root. + return os.ErrInvalid + } + + nDir, nFrag, err := fs.find("rename", newName) + if err != nil { + return err + } + if nDir == nil { + // We can't rename to the root. + return os.ErrInvalid + } + + oNode, ok := oDir.children[oFrag] + if !ok { + return os.ErrNotExist + } + if oNode.children != nil { + if nNode, ok := nDir.children[nFrag]; ok { + if nNode.children == nil { + return errNotADirectory + } + if len(nNode.children) != 0 { + return errDirectoryNotEmpty + } + } + } + delete(oDir.children, oFrag) + nDir.children[nFrag] = oNode + return nil +} + +func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("stat", name) + if err != nil { + return nil, err + } + if dir == nil { + // We're stat'ting the root. + return fs.root.stat("/"), nil + } + if n, ok := dir.children[frag]; ok { + return n.stat(path.Base(name)), nil + } + return nil, os.ErrNotExist +} + +// A memFSNode represents a single entry in the in-memory filesystem and also +// implements os.FileInfo. +type memFSNode struct { + // children is protected by memFS.mu. + children map[string]*memFSNode + + mu sync.Mutex + data []byte + mode os.FileMode + modTime time.Time + deadProps map[xml.Name]Property +} + +func (n *memFSNode) stat(name string) *memFileInfo { + n.mu.Lock() + defer n.mu.Unlock() + return &memFileInfo{ + name: name, + size: int64(len(n.data)), + mode: n.mode, + modTime: n.modTime, + } +} + +func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) { + n.mu.Lock() + defer n.mu.Unlock() + if len(n.deadProps) == 0 { + return nil, nil + } + ret := make(map[xml.Name]Property, len(n.deadProps)) + for k, v := range n.deadProps { + ret[k] = v + } + return ret, nil +} + +func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) { + n.mu.Lock() + defer n.mu.Unlock() + pstat := Propstat{Status: http.StatusOK} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + if patch.Remove { + delete(n.deadProps, p.XMLName) + continue + } + if n.deadProps == nil { + n.deadProps = map[xml.Name]Property{} + } + n.deadProps[p.XMLName] = p + } + } + return []Propstat{pstat}, nil +} + +type memFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (f *memFileInfo) Name() string { return f.name } +func (f *memFileInfo) Size() int64 { return f.size } +func (f *memFileInfo) Mode() os.FileMode { return f.mode } +func (f *memFileInfo) ModTime() time.Time { return f.modTime } +func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() } +func (f *memFileInfo) Sys() interface{} { return nil } + +// A memFile is a File implementation for a memFSNode. It is a per-file (not +// per-node) read/write position, and a snapshot of the memFS' tree structure +// (a node's name and children) for that node. +type memFile struct { + n *memFSNode + nameSnapshot string + childrenSnapshot []os.FileInfo + // pos is protected by n.mu. + pos int +} + +// A *memFile implements the optional DeadPropsHolder interface. +var _ DeadPropsHolder = (*memFile)(nil) + +func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() } +func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) } + +func (f *memFile) Close() error { + return nil +} + +func (f *memFile) Read(p []byte) (int, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos >= len(f.n.data) { + return 0, io.EOF + } + n := copy(p, f.n.data[f.pos:]) + f.pos += n + return n, nil +} + +func (f *memFile) Readdir(count int) ([]os.FileInfo, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if !f.n.mode.IsDir() { + return nil, os.ErrInvalid + } + old := f.pos + if old >= len(f.childrenSnapshot) { + // The os.File Readdir docs say that at the end of a directory, + // the error is io.EOF if count > 0 and nil if count <= 0. + if count > 0 { + return nil, io.EOF + } + return nil, nil + } + if count > 0 { + f.pos += count + if f.pos > len(f.childrenSnapshot) { + f.pos = len(f.childrenSnapshot) + } + } else { + f.pos = len(f.childrenSnapshot) + old = 0 + } + return f.childrenSnapshot[old:f.pos], nil +} + +func (f *memFile) Seek(offset int64, whence int) (int64, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + npos := f.pos + // TODO: How to handle offsets greater than the size of system int? + switch whence { + case os.SEEK_SET: + npos = int(offset) + case os.SEEK_CUR: + npos += int(offset) + case os.SEEK_END: + npos = len(f.n.data) + int(offset) + default: + npos = -1 + } + if npos < 0 { + return 0, os.ErrInvalid + } + f.pos = npos + return int64(f.pos), nil +} + +func (f *memFile) Stat() (os.FileInfo, error) { + return f.n.stat(f.nameSnapshot), nil +} + +func (f *memFile) Write(p []byte) (int, error) { + lenp := len(p) + f.n.mu.Lock() + defer f.n.mu.Unlock() + + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos < len(f.n.data) { + n := copy(f.n.data[f.pos:], p) + f.pos += n + p = p[n:] + } else if f.pos > len(f.n.data) { + // Write permits the creation of holes, if we've seek'ed past the + // existing end of file. + if f.pos <= cap(f.n.data) { + oldLen := len(f.n.data) + f.n.data = f.n.data[:f.pos] + hole := f.n.data[oldLen:] + for i := range hole { + hole[i] = 0 + } + } else { + d := make([]byte, f.pos, f.pos+len(p)) + copy(d, f.n.data) + f.n.data = d + } + } + + if len(p) > 0 { + // We should only get here if f.pos == len(f.n.data). + f.n.data = append(f.n.data, p...) + f.pos = len(f.n.data) + } + f.n.modTime = time.Now() + return lenp, nil +} + +// moveFiles moves files and/or directories from src to dst. +// +// See section 9.9.4 for when various HTTP status codes apply. +func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) { + created := false + if _, err := fs.Stat(ctx, dst); err != nil { + if !os.IsNotExist(err) { + return http.StatusForbidden, err + } + created = true + } else if overwrite { + // Section 9.9.3 says that "If a resource exists at the destination + // and the Overwrite header is "T", then prior to performing the move, + // the server must perform a DELETE with "Depth: infinity" on the + // destination resource. + if err := fs.RemoveAll(ctx, dst); err != nil { + return http.StatusForbidden, err + } + } else { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.Rename(ctx, src, dst); err != nil { + return http.StatusForbidden, err + } + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +func copyProps(dst, src File) error { + d, ok := dst.(DeadPropsHolder) + if !ok { + return nil + } + s, ok := src.(DeadPropsHolder) + if !ok { + return nil + } + m, err := s.DeadProps() + if err != nil { + return err + } + props := make([]Property, 0, len(m)) + for _, prop := range m { + props = append(props, prop) + } + _, err = d.Patch([]Proppatch{{Props: props}}) + return err +} + +// copyFiles copies files and/or directories from src to dst. +// +// See section 9.8.5 for when various HTTP status codes apply. +func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) { + if recursion == 1000 { + return http.StatusInternalServerError, errRecursionTooDeep + } + recursion++ + + // TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/ + // into /A/B/ could lead to infinite recursion if not handled correctly." + + srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + defer srcFile.Close() + srcStat, err := srcFile.Stat() + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + srcPerm := srcStat.Mode() & os.ModePerm + + created := false + if _, err := fs.Stat(ctx, dst); err != nil { + if os.IsNotExist(err) { + created = true + } else { + return http.StatusForbidden, err + } + } else { + if !overwrite { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) { + return http.StatusForbidden, err + } + } + + if srcStat.IsDir() { + if err := fs.Mkdir(ctx, dst, srcPerm); err != nil { + return http.StatusForbidden, err + } + if depth == infiniteDepth { + children, err := srcFile.Readdir(-1) + if err != nil { + return http.StatusForbidden, err + } + for _, c := range children { + name := c.Name() + s := path.Join(src, name) + d := path.Join(dst, name) + cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion) + if cErr != nil { + // TODO: MultiStatus. + return cStatus, cErr + } + } + } + + } else { + dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm) + if err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusForbidden, err + + } + _, copyErr := io.Copy(dstFile, srcFile) + propsErr := copyProps(dstFile, srcFile) + closeErr := dstFile.Close() + if copyErr != nil { + return http.StatusInternalServerError, copyErr + } + if propsErr != nil { + return http.StatusInternalServerError, propsErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + } + + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +// walkFS traverses filesystem fs starting at name up to depth levels. +// +// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node, +// walkFS calls walkFn. If a visited file system node is a directory and +// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node. +func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error { + // This implementation is based on Walk's code in the standard path/filepath package. + err := walkFn(name, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + if !info.IsDir() || depth == 0 { + return nil + } + if depth == 1 { + depth = 0 + } + + // Read directory names. + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return walkFn(name, info, err) + } + fileInfos, err := f.Readdir(0) + f.Close() + if err != nil { + return walkFn(name, info, err) + } + + for _, fileInfo := range fileInfos { + filename := path.Join(name, fileInfo.Name()) + fileInfo, err := fs.Stat(ctx, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/webdav/file_go1.6.go b/vendor/golang.org/x/net/webdav/file_go1.6.go new file mode 100644 index 000000000..fa387700d --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_go1.6.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package webdav + +import ( + "net/http" + + "golang.org/x/net/context" +) + +func getContext(r *http.Request) context.Context { + return context.Background() +} diff --git a/vendor/golang.org/x/net/webdav/file_go1.7.go b/vendor/golang.org/x/net/webdav/file_go1.7.go new file mode 100644 index 000000000..d1c3de832 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_go1.7.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package webdav + +import ( + "context" + "net/http" +) + +func getContext(r *http.Request) context.Context { + return r.Context() +} diff --git a/vendor/golang.org/x/net/webdav/file_test.go b/vendor/golang.org/x/net/webdav/file_test.go new file mode 100644 index 000000000..bfd96e193 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_test.go @@ -0,0 +1,1184 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSlashClean(t *testing.T) { + testCases := []string{ + "", + ".", + "/", + "/./", + "//", + "//.", + "//a", + "/a", + "/a/b/c", + "/a//b/./../c/d/", + "a", + "a/b/c", + } + for _, tc := range testCases { + got := slashClean(tc) + want := path.Clean("/" + tc) + if got != want { + t.Errorf("tc=%q: got %q, want %q", tc, got, want) + } + } +} + +func TestDirResolve(t *testing.T) { + testCases := []struct { + dir, name, want string + }{ + {"/", "", "/"}, + {"/", "/", "/"}, + {"/", ".", "/"}, + {"/", "./a", "/a"}, + {"/", "..", "/"}, + {"/", "..", "/"}, + {"/", "../", "/"}, + {"/", "../.", "/"}, + {"/", "../a", "/a"}, + {"/", "../..", "/"}, + {"/", "../bar/a", "/bar/a"}, + {"/", "../baz/a", "/baz/a"}, + {"/", "...", "/..."}, + {"/", ".../a", "/.../a"}, + {"/", ".../..", "/"}, + {"/", "a", "/a"}, + {"/", "a/./b", "/a/b"}, + {"/", "a/../../b", "/b"}, + {"/", "a/../b", "/b"}, + {"/", "a/b", "/a/b"}, + {"/", "a/b/c/../../d", "/a/d"}, + {"/", "a/b/c/../../../d", "/d"}, + {"/", "a/b/c/../../../../d", "/d"}, + {"/", "a/b/c/d", "/a/b/c/d"}, + + {"/foo/bar", "", "/foo/bar"}, + {"/foo/bar", "/", "/foo/bar"}, + {"/foo/bar", ".", "/foo/bar"}, + {"/foo/bar", "./a", "/foo/bar/a"}, + {"/foo/bar", "..", "/foo/bar"}, + {"/foo/bar", "../", "/foo/bar"}, + {"/foo/bar", "../.", "/foo/bar"}, + {"/foo/bar", "../a", "/foo/bar/a"}, + {"/foo/bar", "../..", "/foo/bar"}, + {"/foo/bar", "../bar/a", "/foo/bar/bar/a"}, + {"/foo/bar", "../baz/a", "/foo/bar/baz/a"}, + {"/foo/bar", "...", "/foo/bar/..."}, + {"/foo/bar", ".../a", "/foo/bar/.../a"}, + {"/foo/bar", ".../..", "/foo/bar"}, + {"/foo/bar", "a", "/foo/bar/a"}, + {"/foo/bar", "a/./b", "/foo/bar/a/b"}, + {"/foo/bar", "a/../../b", "/foo/bar/b"}, + {"/foo/bar", "a/../b", "/foo/bar/b"}, + {"/foo/bar", "a/b", "/foo/bar/a/b"}, + {"/foo/bar", "a/b/c/../../d", "/foo/bar/a/d"}, + {"/foo/bar", "a/b/c/../../../d", "/foo/bar/d"}, + {"/foo/bar", "a/b/c/../../../../d", "/foo/bar/d"}, + {"/foo/bar", "a/b/c/d", "/foo/bar/a/b/c/d"}, + + {"/foo/bar/", "", "/foo/bar"}, + {"/foo/bar/", "/", "/foo/bar"}, + {"/foo/bar/", ".", "/foo/bar"}, + {"/foo/bar/", "./a", "/foo/bar/a"}, + {"/foo/bar/", "..", "/foo/bar"}, + + {"/foo//bar///", "", "/foo/bar"}, + {"/foo//bar///", "/", "/foo/bar"}, + {"/foo//bar///", ".", "/foo/bar"}, + {"/foo//bar///", "./a", "/foo/bar/a"}, + {"/foo//bar///", "..", "/foo/bar"}, + + {"/x/y/z", "ab/c\x00d/ef", ""}, + + {".", "", "."}, + {".", "/", "."}, + {".", ".", "."}, + {".", "./a", "a"}, + {".", "..", "."}, + {".", "..", "."}, + {".", "../", "."}, + {".", "../.", "."}, + {".", "../a", "a"}, + {".", "../..", "."}, + {".", "../bar/a", "bar/a"}, + {".", "../baz/a", "baz/a"}, + {".", "...", "..."}, + {".", ".../a", ".../a"}, + {".", ".../..", "."}, + {".", "a", "a"}, + {".", "a/./b", "a/b"}, + {".", "a/../../b", "b"}, + {".", "a/../b", "b"}, + {".", "a/b", "a/b"}, + {".", "a/b/c/../../d", "a/d"}, + {".", "a/b/c/../../../d", "d"}, + {".", "a/b/c/../../../../d", "d"}, + {".", "a/b/c/d", "a/b/c/d"}, + + {"", "", "."}, + {"", "/", "."}, + {"", ".", "."}, + {"", "./a", "a"}, + {"", "..", "."}, + } + + for _, tc := range testCases { + d := Dir(filepath.FromSlash(tc.dir)) + if got := filepath.ToSlash(d.resolve(tc.name)); got != tc.want { + t.Errorf("dir=%q, name=%q: got %q, want %q", tc.dir, tc.name, got, tc.want) + } + } +} + +func TestWalk(t *testing.T) { + type walkStep struct { + name, frag string + final bool + } + + testCases := []struct { + dir string + want []walkStep + }{ + {"", []walkStep{ + {"", "", true}, + }}, + {"/", []walkStep{ + {"", "", true}, + }}, + {"/a", []walkStep{ + {"", "a", true}, + }}, + {"/a/", []walkStep{ + {"", "a", true}, + }}, + {"/a/b", []walkStep{ + {"", "a", false}, + {"a", "b", true}, + }}, + {"/a/b/", []walkStep{ + {"", "a", false}, + {"a", "b", true}, + }}, + {"/a/b/c", []walkStep{ + {"", "a", false}, + {"a", "b", false}, + {"b", "c", true}, + }}, + // The following test case is the one mentioned explicitly + // in the method description. + {"/foo/bar/x", []walkStep{ + {"", "foo", false}, + {"foo", "bar", false}, + {"bar", "x", true}, + }}, + } + + ctx := context.Background() + + for _, tc := range testCases { + fs := NewMemFS().(*memFS) + + parts := strings.Split(tc.dir, "/") + for p := 2; p < len(parts); p++ { + d := strings.Join(parts[:p], "/") + if err := fs.Mkdir(ctx, d, 0666); err != nil { + t.Errorf("tc.dir=%q: mkdir: %q: %v", tc.dir, d, err) + } + } + + i, prevFrag := 0, "" + err := fs.walk("test", tc.dir, func(dir *memFSNode, frag string, final bool) error { + got := walkStep{ + name: prevFrag, + frag: frag, + final: final, + } + want := tc.want[i] + + if got != want { + return fmt.Errorf("got %+v, want %+v", got, want) + } + i, prevFrag = i+1, frag + return nil + }) + if err != nil { + t.Errorf("tc.dir=%q: %v", tc.dir, err) + } + } +} + +// find appends to ss the names of the named file and its children. It is +// analogous to the Unix find command. +// +// The returned strings are not guaranteed to be in any particular order. +func find(ctx context.Context, ss []string, fs FileSystem, name string) ([]string, error) { + stat, err := fs.Stat(ctx, name) + if err != nil { + return nil, err + } + ss = append(ss, name) + if stat.IsDir() { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + children, err := f.Readdir(-1) + if err != nil { + return nil, err + } + for _, c := range children { + ss, err = find(ctx, ss, fs, path.Join(name, c.Name())) + if err != nil { + return nil, err + } + } + } + return ss, nil +} + +func testFS(t *testing.T, fs FileSystem) { + errStr := func(err error) string { + switch { + case os.IsExist(err): + return "errExist" + case os.IsNotExist(err): + return "errNotExist" + case err != nil: + return "err" + } + return "ok" + } + + // The non-"find" non-"stat" test cases should change the file system state. The + // indentation of the "find"s and "stat"s helps distinguish such test cases. + testCases := []string{ + " stat / want dir", + " stat /a want errNotExist", + " stat /d want errNotExist", + " stat /d/e want errNotExist", + "create /a A want ok", + " stat /a want 1", + "create /d/e EEE want errNotExist", + "mk-dir /a want errExist", + "mk-dir /d/m want errNotExist", + "mk-dir /d want ok", + " stat /d want dir", + "create /d/e EEE want ok", + " stat /d/e want 3", + " find / /a /d /d/e", + "create /d/f FFFF want ok", + "create /d/g GGGGGGG want ok", + "mk-dir /d/m want ok", + "mk-dir /d/m want errExist", + "create /d/m/p PPPPP want ok", + " stat /d/e want 3", + " stat /d/f want 4", + " stat /d/g want 7", + " stat /d/h want errNotExist", + " stat /d/m want dir", + " stat /d/m/p want 5", + " find / /a /d /d/e /d/f /d/g /d/m /d/m/p", + "rm-all /d want ok", + " stat /a want 1", + " stat /d want errNotExist", + " stat /d/e want errNotExist", + " stat /d/f want errNotExist", + " stat /d/g want errNotExist", + " stat /d/m want errNotExist", + " stat /d/m/p want errNotExist", + " find / /a", + "mk-dir /d/m want errNotExist", + "mk-dir /d want ok", + "create /d/f FFFF want ok", + "rm-all /d/f want ok", + "mk-dir /d/m want ok", + "rm-all /z want ok", + "rm-all / want err", + "create /b BB want ok", + " stat / want dir", + " stat /a want 1", + " stat /b want 2", + " stat /c want errNotExist", + " stat /d want dir", + " stat /d/m want dir", + " find / /a /b /d /d/m", + "move__ o=F /b /c want ok", + " stat /b want errNotExist", + " stat /c want 2", + " stat /d/m want dir", + " stat /d/n want errNotExist", + " find / /a /c /d /d/m", + "move__ o=F /d/m /d/n want ok", + "create /d/n/q QQQQ want ok", + " stat /d/m want errNotExist", + " stat /d/n want dir", + " stat /d/n/q want 4", + "move__ o=F /d /d/n/z want err", + "move__ o=T /c /d/n/q want ok", + " stat /c want errNotExist", + " stat /d/n/q want 2", + " find / /a /d /d/n /d/n/q", + "create /d/n/r RRRRR want ok", + "mk-dir /u want ok", + "mk-dir /u/v want ok", + "move__ o=F /d/n /u want errExist", + "create /t TTTTTT want ok", + "move__ o=F /d/n /t want errExist", + "rm-all /t want ok", + "move__ o=F /d/n /t want ok", + " stat /d want dir", + " stat /d/n want errNotExist", + " stat /d/n/r want errNotExist", + " stat /t want dir", + " stat /t/q want 2", + " stat /t/r want 5", + " find / /a /d /t /t/q /t/r /u /u/v", + "move__ o=F /t / want errExist", + "move__ o=T /t /u/v want ok", + " stat /u/v/r want 5", + "move__ o=F / /z want err", + " find / /a /d /u /u/v /u/v/q /u/v/r", + " stat /a want 1", + " stat /b want errNotExist", + " stat /c want errNotExist", + " stat /u/v/r want 5", + "copy__ o=F d=0 /a /b want ok", + "copy__ o=T d=0 /a /c want ok", + " stat /a want 1", + " stat /b want 1", + " stat /c want 1", + " stat /u/v/r want 5", + "copy__ o=F d=0 /u/v/r /b want errExist", + " stat /b want 1", + "copy__ o=T d=0 /u/v/r /b want ok", + " stat /a want 1", + " stat /b want 5", + " stat /u/v/r want 5", + "rm-all /a want ok", + "rm-all /b want ok", + "mk-dir /u/v/w want ok", + "create /u/v/w/s SSSSSSSS want ok", + " stat /d want dir", + " stat /d/x want errNotExist", + " stat /d/y want errNotExist", + " stat /u/v/r want 5", + " stat /u/v/w/s want 8", + " find / /c /d /u /u/v /u/v/q /u/v/r /u/v/w /u/v/w/s", + "copy__ o=T d=0 /u/v /d/x want ok", + "copy__ o=T d=∞ /u/v /d/y want ok", + "rm-all /u want ok", + " stat /d/x want dir", + " stat /d/x/q want errNotExist", + " stat /d/x/r want errNotExist", + " stat /d/x/w want errNotExist", + " stat /d/x/w/s want errNotExist", + " stat /d/y want dir", + " stat /d/y/q want 2", + " stat /d/y/r want 5", + " stat /d/y/w want dir", + " stat /d/y/w/s want 8", + " stat /u want errNotExist", + " find / /c /d /d/x /d/y /d/y/q /d/y/r /d/y/w /d/y/w/s", + "copy__ o=F d=∞ /d/y /d/x want errExist", + } + + ctx := context.Background() + + for i, tc := range testCases { + tc = strings.TrimSpace(tc) + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "create": + parts := strings.Split(arg, " ") + if len(parts) != 4 || parts[2] != "want" { + t.Fatalf("test case #%d %q: invalid write", i, tc) + } + f, opErr := fs.OpenFile(ctx, parts[0], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if got := errStr(opErr); got != parts[3] { + t.Fatalf("test case #%d %q: OpenFile: got %q (%v), want %q", i, tc, got, opErr, parts[3]) + } + if f != nil { + if _, err := f.Write([]byte(parts[1])); err != nil { + t.Fatalf("test case #%d %q: Write: %v", i, tc, err) + } + if err := f.Close(); err != nil { + t.Fatalf("test case #%d %q: Close: %v", i, tc, err) + } + } + + case "find": + got, err := find(ctx, nil, fs, "/") + if err != nil { + t.Fatalf("test case #%d %q: find: %v", i, tc, err) + } + sort.Strings(got) + want := strings.Split(arg, " ") + if !reflect.DeepEqual(got, want) { + t.Fatalf("test case #%d %q:\ngot %s\nwant %s", i, tc, got, want) + } + + case "copy__", "mk-dir", "move__", "rm-all", "stat": + nParts := 3 + switch op { + case "copy__": + nParts = 6 + case "move__": + nParts = 5 + } + parts := strings.Split(arg, " ") + if len(parts) != nParts { + t.Fatalf("test case #%d %q: invalid %s", i, tc, op) + } + + got, opErr := "", error(nil) + switch op { + case "copy__": + depth := 0 + if parts[1] == "d=∞" { + depth = infiniteDepth + } + _, opErr = copyFiles(ctx, fs, parts[2], parts[3], parts[0] == "o=T", depth, 0) + case "mk-dir": + opErr = fs.Mkdir(ctx, parts[0], 0777) + case "move__": + _, opErr = moveFiles(ctx, fs, parts[1], parts[2], parts[0] == "o=T") + case "rm-all": + opErr = fs.RemoveAll(ctx, parts[0]) + case "stat": + var stat os.FileInfo + fileName := parts[0] + if stat, opErr = fs.Stat(ctx, fileName); opErr == nil { + if stat.IsDir() { + got = "dir" + } else { + got = strconv.Itoa(int(stat.Size())) + } + + if fileName == "/" { + // For a Dir FileSystem, the virtual file system root maps to a + // real file system name like "/tmp/webdav-test012345", which does + // not end with "/". We skip such cases. + } else if statName := stat.Name(); path.Base(fileName) != statName { + t.Fatalf("test case #%d %q: file name %q inconsistent with stat name %q", + i, tc, fileName, statName) + } + } + } + if got == "" { + got = errStr(opErr) + } + + if parts[len(parts)-2] != "want" { + t.Fatalf("test case #%d %q: invalid %s", i, tc, op) + } + if want := parts[len(parts)-1]; got != want { + t.Fatalf("test case #%d %q: got %q (%v), want %q", i, tc, got, opErr, want) + } + } + } +} + +func TestDir(t *testing.T) { + switch runtime.GOOS { + case "nacl": + t.Skip("see golang.org/issue/12004") + case "plan9": + t.Skip("see golang.org/issue/11453") + } + + td, err := ioutil.TempDir("", "webdav-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + testFS(t, Dir(td)) +} + +func TestMemFS(t *testing.T) { + testFS(t, NewMemFS()) +} + +func TestMemFSRoot(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + for i := 0; i < 5; i++ { + stat, err := fs.Stat(ctx, "/") + if err != nil { + t.Fatalf("i=%d: Stat: %v", i, err) + } + if !stat.IsDir() { + t.Fatalf("i=%d: Stat.IsDir is false, want true", i) + } + + f, err := fs.OpenFile(ctx, "/", os.O_RDONLY, 0) + if err != nil { + t.Fatalf("i=%d: OpenFile: %v", i, err) + } + defer f.Close() + children, err := f.Readdir(-1) + if err != nil { + t.Fatalf("i=%d: Readdir: %v", i, err) + } + if len(children) != i { + t.Fatalf("i=%d: got %d children, want %d", i, len(children), i) + } + + if _, err := f.Write(make([]byte, 1)); err == nil { + t.Fatalf("i=%d: Write: got nil error, want non-nil", i) + } + + if err := fs.Mkdir(ctx, fmt.Sprintf("/dir%d", i), 0777); err != nil { + t.Fatalf("i=%d: Mkdir: %v", i, err) + } + } +} + +func TestMemFileReaddir(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + if err := fs.Mkdir(ctx, "/foo", 0777); err != nil { + t.Fatalf("Mkdir: %v", err) + } + readdir := func(count int) ([]os.FileInfo, error) { + f, err := fs.OpenFile(ctx, "/foo", os.O_RDONLY, 0) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + return f.Readdir(count) + } + if got, err := readdir(-1); len(got) != 0 || err != nil { + t.Fatalf("readdir(-1): got %d fileInfos with err=%v, want 0, ", len(got), err) + } + if got, err := readdir(+1); len(got) != 0 || err != io.EOF { + t.Fatalf("readdir(+1): got %d fileInfos with err=%v, want 0, EOF", len(got), err) + } +} + +func TestMemFile(t *testing.T) { + testCases := []string{ + "wantData ", + "wantSize 0", + "write abc", + "wantData abc", + "write de", + "wantData abcde", + "wantSize 5", + "write 5*x", + "write 4*y+2*z", + "write 3*st", + "wantData abcdexxxxxyyyyzzststst", + "wantSize 22", + "seek set 4 want 4", + "write EFG", + "wantData abcdEFGxxxyyyyzzststst", + "wantSize 22", + "seek set 2 want 2", + "read cdEF", + "read Gx", + "seek cur 0 want 8", + "seek cur 2 want 10", + "seek cur -1 want 9", + "write J", + "wantData abcdEFGxxJyyyyzzststst", + "wantSize 22", + "seek cur -4 want 6", + "write ghijk", + "wantData abcdEFghijkyyyzzststst", + "wantSize 22", + "read yyyz", + "seek cur 0 want 15", + "write ", + "seek cur 0 want 15", + "read ", + "seek cur 0 want 15", + "seek end -3 want 19", + "write ZZ", + "wantData abcdEFghijkyyyzzstsZZt", + "wantSize 22", + "write 4*A", + "wantData abcdEFghijkyyyzzstsZZAAAA", + "wantSize 25", + "seek end 0 want 25", + "seek end -5 want 20", + "read Z+4*A", + "write 5*B", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB", + "wantSize 30", + "seek end 10 want 40", + "write C", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........C", + "wantSize 41", + "write D", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD", + "wantSize 42", + "seek set 43 want 43", + "write E", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD.E", + "wantSize 44", + "seek set 0 want 0", + "write 5*123456789_", + "wantData 123456789_123456789_123456789_123456789_123456789_", + "wantSize 50", + "seek cur 0 want 50", + "seek cur -99 want err", + } + + ctx := context.Background() + + const filename = "/foo" + fs := NewMemFS() + f, err := fs.OpenFile(ctx, filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + for i, tc := range testCases { + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + + // Expand an arg like "3*a+2*b" to "aaabb". + parts := strings.Split(arg, "+") + for j, part := range parts { + if k := strings.IndexByte(part, '*'); k >= 0 { + repeatCount, repeatStr := part[:k], part[k+1:] + n, err := strconv.Atoi(repeatCount) + if err != nil { + t.Fatalf("test case #%d %q: invalid repeat count %q", i, tc, repeatCount) + } + parts[j] = strings.Repeat(repeatStr, n) + } + } + arg = strings.Join(parts, "") + + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "read": + buf := make([]byte, len(arg)) + if _, err := io.ReadFull(f, buf); err != nil { + t.Fatalf("test case #%d %q: ReadFull: %v", i, tc, err) + } + if got := string(buf); got != arg { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) + } + + case "seek": + parts := strings.Split(arg, " ") + if len(parts) != 4 { + t.Fatalf("test case #%d %q: invalid seek", i, tc) + } + + whence := 0 + switch parts[0] { + default: + t.Fatalf("test case #%d %q: invalid seek whence", i, tc) + case "set": + whence = os.SEEK_SET + case "cur": + whence = os.SEEK_CUR + case "end": + whence = os.SEEK_END + } + offset, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("test case #%d %q: invalid offset %q", i, tc, parts[1]) + } + + if parts[2] != "want" { + t.Fatalf("test case #%d %q: invalid seek", i, tc) + } + if parts[3] == "err" { + _, err := f.Seek(int64(offset), whence) + if err == nil { + t.Fatalf("test case #%d %q: Seek returned nil error, want non-nil", i, tc) + } + } else { + got, err := f.Seek(int64(offset), whence) + if err != nil { + t.Fatalf("test case #%d %q: Seek: %v", i, tc, err) + } + want, err := strconv.Atoi(parts[3]) + if err != nil { + t.Fatalf("test case #%d %q: invalid want %q", i, tc, parts[3]) + } + if got != int64(want) { + t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) + } + } + + case "write": + n, err := f.Write([]byte(arg)) + if err != nil { + t.Fatalf("test case #%d %q: write: %v", i, tc, err) + } + if n != len(arg) { + t.Fatalf("test case #%d %q: write returned %d bytes, want %d", i, tc, n, len(arg)) + } + + case "wantData": + g, err := fs.OpenFile(ctx, filename, os.O_RDONLY, 0666) + if err != nil { + t.Fatalf("test case #%d %q: OpenFile: %v", i, tc, err) + } + gotBytes, err := ioutil.ReadAll(g) + if err != nil { + t.Fatalf("test case #%d %q: ReadAll: %v", i, tc, err) + } + for i, c := range gotBytes { + if c == '\x00' { + gotBytes[i] = '.' + } + } + got := string(gotBytes) + if got != arg { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) + } + if err := g.Close(); err != nil { + t.Fatalf("test case #%d %q: Close: %v", i, tc, err) + } + + case "wantSize": + n, err := strconv.Atoi(arg) + if err != nil { + t.Fatalf("test case #%d %q: invalid size %q", i, tc, arg) + } + fi, err := fs.Stat(ctx, filename) + if err != nil { + t.Fatalf("test case #%d %q: Stat: %v", i, tc, err) + } + if got, want := fi.Size(), int64(n); got != want { + t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) + } + } + } +} + +// TestMemFileWriteAllocs tests that writing N consecutive 1KiB chunks to a +// memFile doesn't allocate a new buffer for each of those N times. Otherwise, +// calling io.Copy(aMemFile, src) is likely to have quadratic complexity. +func TestMemFileWriteAllocs(t *testing.T) { + if runtime.Compiler == "gccgo" { + t.Skip("gccgo allocates here") + } + ctx := context.Background() + fs := NewMemFS() + f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + xxx := make([]byte, 1024) + for i := range xxx { + xxx[i] = 'x' + } + + a := testing.AllocsPerRun(100, func() { + f.Write(xxx) + }) + // AllocsPerRun returns an integral value, so we compare the rounded-down + // number to zero. + if a > 0 { + t.Fatalf("%v allocs per run, want 0", a) + } +} + +func BenchmarkMemFileWrite(b *testing.B) { + ctx := context.Background() + fs := NewMemFS() + xxx := make([]byte, 1024) + for i := range xxx { + xxx[i] = 'x' + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + b.Fatalf("OpenFile: %v", err) + } + for j := 0; j < 100; j++ { + f.Write(xxx) + } + if err := f.Close(); err != nil { + b.Fatalf("Close: %v", err) + } + if err := fs.RemoveAll(ctx, "/xxx"); err != nil { + b.Fatalf("RemoveAll: %v", err) + } + } +} + +func TestCopyMoveProps(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + create := func(name string) error { + f, err := fs.OpenFile(ctx, name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + _, wErr := f.Write([]byte("contents")) + cErr := f.Close() + if wErr != nil { + return wErr + } + return cErr + } + patch := func(name string, patches ...Proppatch) error { + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666) + if err != nil { + return err + } + _, pErr := f.(DeadPropsHolder).Patch(patches) + cErr := f.Close() + if pErr != nil { + return pErr + } + return cErr + } + props := func(name string) (map[xml.Name]Property, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666) + if err != nil { + return nil, err + } + m, pErr := f.(DeadPropsHolder).DeadProps() + cErr := f.Close() + if pErr != nil { + return nil, pErr + } + if cErr != nil { + return nil, cErr + } + return m, nil + } + + p0 := Property{ + XMLName: xml.Name{Space: "x:", Local: "boat"}, + InnerXML: []byte("pea-green"), + } + p1 := Property{ + XMLName: xml.Name{Space: "x:", Local: "ring"}, + InnerXML: []byte("1 shilling"), + } + p2 := Property{ + XMLName: xml.Name{Space: "x:", Local: "spoon"}, + InnerXML: []byte("runcible"), + } + p3 := Property{ + XMLName: xml.Name{Space: "x:", Local: "moon"}, + InnerXML: []byte("light"), + } + + if err := create("/src"); err != nil { + t.Fatalf("create /src: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0, p1}}); err != nil { + t.Fatalf("patch /src +p0 +p1: %v", err) + } + if _, err := copyFiles(ctx, fs, "/src", "/tmp", true, infiniteDepth, 0); err != nil { + t.Fatalf("copyFiles /src /tmp: %v", err) + } + if _, err := moveFiles(ctx, fs, "/tmp", "/dst", true); err != nil { + t.Fatalf("moveFiles /tmp /dst: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0}, Remove: true}); err != nil { + t.Fatalf("patch /src -p0: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p2}}); err != nil { + t.Fatalf("patch /src +p2: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p1}, Remove: true}); err != nil { + t.Fatalf("patch /dst -p1: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p3}}); err != nil { + t.Fatalf("patch /dst +p3: %v", err) + } + + gotSrc, err := props("/src") + if err != nil { + t.Fatalf("props /src: %v", err) + } + wantSrc := map[xml.Name]Property{ + p1.XMLName: p1, + p2.XMLName: p2, + } + if !reflect.DeepEqual(gotSrc, wantSrc) { + t.Fatalf("props /src:\ngot %v\nwant %v", gotSrc, wantSrc) + } + + gotDst, err := props("/dst") + if err != nil { + t.Fatalf("props /dst: %v", err) + } + wantDst := map[xml.Name]Property{ + p0.XMLName: p0, + p3.XMLName: p3, + } + if !reflect.DeepEqual(gotDst, wantDst) { + t.Fatalf("props /dst:\ngot %v\nwant %v", gotDst, wantDst) + } +} + +func TestWalkFS(t *testing.T) { + testCases := []struct { + desc string + buildfs []string + startAt string + depth int + walkFn filepath.WalkFunc + want []string + }{{ + "just root", + []string{}, + "/", + infiniteDepth, + nil, + []string{ + "/", + }, + }, { + "infinite walk from root", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/", + infiniteDepth, + nil, + []string{ + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/d", + "/e", + "/f", + }, + }, { + "infinite walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/a", + infiniteDepth, + nil, + []string{ + "/a", + "/a/b", + "/a/b/c", + "/a/d", + }, + }, { + "depth 1 walk from root", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/", + 1, + nil, + []string{ + "/", + "/a", + "/e", + "/f", + }, + }, { + "depth 1 walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + }, + "/a/b", + 1, + nil, + []string{ + "/a/b", + "/a/b/c", + "/a/b/g", + }, + }, { + "depth 0 walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + }, + "/a/b", + 0, + nil, + []string{ + "/a/b", + }, + }, { + "infinite walk from file", + []string{ + "mkdir /a", + "touch /a/b", + "touch /a/c", + }, + "/a/b", + 0, + nil, + []string{ + "/a/b", + }, + }, { + "infinite walk with skipped subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + "touch /a/b/z", + }, + "/", + infiniteDepth, + func(path string, info os.FileInfo, err error) error { + if path == "/a/b/g" { + return filepath.SkipDir + } + return nil + }, + []string{ + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/b/z", + }, + }} + ctx := context.Background() + for _, tc := range testCases { + fs, err := buildTestFS(tc.buildfs) + if err != nil { + t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) + } + var got []string + traceFn := func(path string, info os.FileInfo, err error) error { + if tc.walkFn != nil { + err = tc.walkFn(path, info, err) + if err != nil { + return err + } + } + got = append(got, path) + return nil + } + fi, err := fs.Stat(ctx, tc.startAt) + if err != nil { + t.Fatalf("%s: cannot stat: %v", tc.desc, err) + } + err = walkFS(ctx, fs, tc.depth, tc.startAt, fi, traceFn) + if err != nil { + t.Errorf("%s:\ngot error %v, want nil", tc.desc, err) + continue + } + sort.Strings(got) + sort.Strings(tc.want) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %q\nwant %q", tc.desc, got, tc.want) + continue + } + } +} + +func buildTestFS(buildfs []string) (FileSystem, error) { + // TODO: Could this be merged with the build logic in TestFS? + + ctx := context.Background() + fs := NewMemFS() + for _, b := range buildfs { + op := strings.Split(b, " ") + switch op[0] { + case "mkdir": + err := fs.Mkdir(ctx, op[1], os.ModeDir|0777) + if err != nil { + return nil, err + } + case "touch": + f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + f.Close() + case "write": + f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return nil, err + } + _, err = f.Write([]byte(op[2])) + f.Close() + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown file operation %q", op[0]) + } + } + return fs, nil +} diff --git a/vendor/golang.org/x/net/webdav/if.go b/vendor/golang.org/x/net/webdav/if.go new file mode 100644 index 000000000..416e81cdf --- /dev/null +++ b/vendor/golang.org/x/net/webdav/if.go @@ -0,0 +1,173 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +// The If header is covered by Section 10.4. +// http://www.webdav.org/specs/rfc4918.html#HEADER_If + +import ( + "strings" +) + +// ifHeader is a disjunction (OR) of ifLists. +type ifHeader struct { + lists []ifList +} + +// ifList is a conjunction (AND) of Conditions, and an optional resource tag. +type ifList struct { + resourceTag string + conditions []Condition +} + +// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string +// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is +// returned by req.Header.Get("If") for a http.Request req. +func parseIfHeader(httpHeader string) (h ifHeader, ok bool) { + s := strings.TrimSpace(httpHeader) + switch tokenType, _, _ := lex(s); tokenType { + case '(': + return parseNoTagLists(s) + case angleTokenType: + return parseTaggedLists(s) + default: + return ifHeader{}, false + } +} + +func parseNoTagLists(s string) (h ifHeader, ok bool) { + for { + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + } +} + +func parseTaggedLists(s string) (h ifHeader, ok bool) { + resourceTag, n := "", 0 + for first := true; ; first = false { + tokenType, tokenStr, remaining := lex(s) + switch tokenType { + case angleTokenType: + if !first && n == 0 { + return ifHeader{}, false + } + resourceTag, n = tokenStr, 0 + s = remaining + case '(': + n++ + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + l.resourceTag = resourceTag + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + default: + return ifHeader{}, false + } + } +} + +func parseList(s string) (l ifList, remaining string, ok bool) { + tokenType, _, s := lex(s) + if tokenType != '(' { + return ifList{}, "", false + } + for { + tokenType, _, remaining = lex(s) + if tokenType == ')' { + if len(l.conditions) == 0 { + return ifList{}, "", false + } + return l, remaining, true + } + c, remaining, ok := parseCondition(s) + if !ok { + return ifList{}, "", false + } + l.conditions = append(l.conditions, c) + s = remaining + } +} + +func parseCondition(s string) (c Condition, remaining string, ok bool) { + tokenType, tokenStr, s := lex(s) + if tokenType == notTokenType { + c.Not = true + tokenType, tokenStr, s = lex(s) + } + switch tokenType { + case strTokenType, angleTokenType: + c.Token = tokenStr + case squareTokenType: + c.ETag = tokenStr + default: + return Condition{}, "", false + } + return c, s, true +} + +// Single-rune tokens like '(' or ')' have a token type equal to their rune. +// All other tokens have a negative token type. +const ( + errTokenType = rune(-1) + eofTokenType = rune(-2) + strTokenType = rune(-3) + notTokenType = rune(-4) + angleTokenType = rune(-5) + squareTokenType = rune(-6) +) + +func lex(s string) (tokenType rune, tokenStr string, remaining string) { + // The net/textproto Reader that parses the HTTP header will collapse + // Linear White Space that spans multiple "\r\n" lines to a single " ", + // so we don't need to look for '\r' or '\n'. + for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') { + s = s[1:] + } + if len(s) == 0 { + return eofTokenType, "", "" + } + i := 0 +loop: + for ; i < len(s); i++ { + switch s[i] { + case '\t', ' ', '(', ')', '<', '>', '[', ']': + break loop + } + } + + if i != 0 { + tokenStr, remaining = s[:i], s[i:] + if tokenStr == "Not" { + return notTokenType, "", remaining + } + return strTokenType, tokenStr, remaining + } + + j := 0 + switch s[0] { + case '<': + j, tokenType = strings.IndexByte(s, '>'), angleTokenType + case '[': + j, tokenType = strings.IndexByte(s, ']'), squareTokenType + default: + return rune(s[0]), "", s[1:] + } + if j < 0 { + return errTokenType, "", "" + } + return tokenType, s[1:j], s[j+1:] +} diff --git a/vendor/golang.org/x/net/webdav/if_test.go b/vendor/golang.org/x/net/webdav/if_test.go new file mode 100644 index 000000000..aad61a401 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/if_test.go @@ -0,0 +1,322 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "reflect" + "strings" + "testing" +) + +func TestParseIfHeader(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + want ifHeader + }{{ + "bad: empty", + ``, + ifHeader{}, + }, { + "bad: no parens", + `foobar`, + ifHeader{}, + }, { + "bad: empty list #1", + `()`, + ifHeader{}, + }, { + "bad: empty list #2", + `(a) (b c) () (d)`, + ifHeader{}, + }, { + "bad: no list after resource #1", + ``, + ifHeader{}, + }, { + "bad: no list after resource #2", + ` (a)`, + ifHeader{}, + }, { + "bad: no list after resource #3", + ` (a) (b) `, + ifHeader{}, + }, { + "bad: no-tag-list followed by tagged-list", + `(a) (b) (c)`, + ifHeader{}, + }, { + "bad: unfinished list", + `(a`, + ifHeader{}, + }, { + "bad: unfinished ETag", + `([b`, + ifHeader{}, + }, { + "bad: unfinished Notted list", + `(Not a`, + ifHeader{}, + }, { + "bad: double Not", + `(Not Not a)`, + ifHeader{}, + }, { + "good: one list with a Token", + `(a)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }}, + }, + }, { + "good: one list with an ETag", + `([a])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + ETag: `a`, + }}, + }}, + }, + }, { + "good: one list with three Nots", + `(Not a Not b Not [d])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }, { + Not: true, + Token: `b`, + }, { + Not: true, + ETag: `d`, + }}, + }}, + }, + }, { + "good: two lists", + `(a) (b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Token: `b`, + }}, + }}, + }, + }, { + "good: two Notted lists", + `(Not a) (Not b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `b`, + }}, + }}, + }, + }, { + "section 7.5.1", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/users/f/fielding/index.html`, + conditions: []Condition{{ + Token: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`, + }}, + }}, + }, + }, { + "section 7.5.2 #1", + `()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #2", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #3", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/member`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 9.9.6", + `() + ()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`, + }}, + }, { + conditions: []Condition{{ + Token: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`, + }}, + }}, + }, + }, { + "section 9.10.8", + `()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`, + }}, + }}, + }, + }, { + "section 10.4.6", + `( + ["I am an ETag"]) + (["I am another ETag"])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `"I am an ETag"`, + }}, + }, { + conditions: []Condition{{ + ETag: `"I am another ETag"`, + }}, + }}, + }, + }, { + "section 10.4.7", + `(Not + )`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + Token: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`, + }}, + }}, + }, + }, { + "section 10.4.8", + `() + (Not )`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `DAV:no-lock`, + }}, + }}, + }, + }, { + "section 10.4.9", + ` + ( + [W/"A weak ETag"]) (["strong ETag"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/resource1`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `W/"A weak ETag"`, + }}, + }, { + resourceTag: `/resource1`, + conditions: []Condition{{ + ETag: `"strong ETag"`, + }}, + }}, + }, + }, { + "section 10.4.10", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/specs/`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }}, + }, + }, { + "section 10.4.11 #1", + ` (["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + ETag: `"4217"`, + }}, + }}, + }, + }, { + "section 10.4.11 #2", + ` (Not ["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + Not: true, + ETag: `"4217"`, + }}, + }}, + }, + }} + + for _, tc := range testCases { + got, ok := parseIfHeader(strings.Replace(tc.input, "\n", "", -1)) + if gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok { + t.Errorf("%s: should be different: empty header == %t, ok == %t", tc.desc, gotEmpty, ok) + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %v\nwant %v", tc.desc, got, tc.want) + continue + } + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/README b/vendor/golang.org/x/net/webdav/internal/xml/README new file mode 100644 index 000000000..89656f489 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/README @@ -0,0 +1,11 @@ +This is a fork of the encoding/xml package at ca1d6c4, the last commit before +https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name +space behavior" made late in the lead-up to the Go 1.5 release. + +The list of encoding/xml changes is at +https://go.googlesource.com/go/+log/master/src/encoding/xml + +This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is +released. + +See http://golang.org/issue/11841 diff --git a/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go new file mode 100644 index 000000000..a71284312 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import "time" + +var atomValue = &Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Example Feed", + Link: []Link{{Href: "http://example.org/"}}, + Updated: ParseTime("2003-12-13T18:30:02Z"), + Author: Person{Name: "John Doe"}, + Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6", + + Entry: []Entry{ + { + Title: "Atom-Powered Robots Run Amok", + Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}}, + Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a", + Updated: ParseTime("2003-12-13T18:30:02Z"), + Summary: NewText("Some text."), + }, + }, +} + +var atomXml = `` + + `` + + `Example Feed` + + `urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6` + + `` + + `John Doe` + + `` + + `Atom-Powered Robots Run Amok` + + `urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a` + + `` + + `2003-12-13T18:30:02Z` + + `` + + `Some text.` + + `` + + `` + +func ParseTime(str string) time.Time { + t, err := time.Parse(time.RFC3339, str) + if err != nil { + panic(err) + } + return t +} + +func NewText(text string) Text { + return Text{ + Body: text, + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/example_test.go b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go new file mode 100644 index 000000000..21b48dea5 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go @@ -0,0 +1,151 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml_test + +import ( + "encoding/xml" + "fmt" + "os" +) + +func ExampleMarshalIndent() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + output, err := xml.MarshalIndent(v, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + os.Stdout.Write(output) + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +func ExampleEncoder() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + enc := xml.NewEncoder(os.Stdout) + enc.Indent(" ", " ") + if err := enc.Encode(v); err != nil { + fmt.Printf("error: %v\n", err) + } + + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +// This example demonstrates unmarshaling an XML excerpt into a value with +// some preset fields. Note that the Phone field isn't modified and that +// the XML element is ignored. Also, the Groups field is assigned +// considering the element path provided in its tag. +func ExampleUnmarshal() { + type Email struct { + Where string `xml:"where,attr"` + Addr string + } + type Address struct { + City, State string + } + type Result struct { + XMLName xml.Name `xml:"Person"` + Name string `xml:"FullName"` + Phone string + Email []Email + Groups []string `xml:"Group>Value"` + Address + } + v := Result{Name: "none", Phone: "none"} + + data := ` + + Grace R. Emlin + Example Inc. + + gre@example.com + + + gre@work.com + + + Friends + Squash + + Hanga Roa + Easter Island + + ` + err := xml.Unmarshal([]byte(data), &v) + if err != nil { + fmt.Printf("error: %v", err) + return + } + fmt.Printf("XMLName: %#v\n", v.XMLName) + fmt.Printf("Name: %q\n", v.Name) + fmt.Printf("Phone: %q\n", v.Phone) + fmt.Printf("Email: %v\n", v.Email) + fmt.Printf("Groups: %v\n", v.Groups) + fmt.Printf("Address: %v\n", v.Address) + // Output: + // XMLName: xml.Name{Space:"", Local:"Person"} + // Name: "Grace R. Emlin" + // Phone: "none" + // Email: [{home gre@example.com} {work gre@work.com}] + // Groups: [Friends Squash] + // Address: {Hanga Roa Easter Island} +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go new file mode 100644 index 000000000..cb82ec214 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go @@ -0,0 +1,1223 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "reflect" + "strconv" + "strings" +) + +const ( + // A generic XML header suitable for use with the output of Marshal. + // This is not automatically added to any output of this package, + // it is provided as a convenience. + Header = `` + "\n" +) + +// Marshal returns the XML encoding of v. +// +// Marshal handles an array or slice by marshalling each of the elements. +// Marshal handles a pointer by marshalling the value it points at or, if the +// pointer is nil, by writing nothing. Marshal handles an interface value by +// marshalling the value it contains or, if the interface value is nil, by +// writing nothing. Marshal handles all other data by writing one or more XML +// elements containing the data. +// +// The name for the XML elements is taken from, in order of preference: +// - the tag on the XMLName field, if the data is a struct +// - the value of the XMLName field of type xml.Name +// - the tag of the struct field used to obtain the data +// - the name of the struct field used to obtain the data +// - the name of the marshalled type +// +// The XML element for a struct contains marshalled elements for each of the +// exported fields of the struct, with these exceptions: +// - the XMLName field, described above, is omitted. +// - a field with tag "-" is omitted. +// - a field with tag "name,attr" becomes an attribute with +// the given name in the XML element. +// - a field with tag ",attr" becomes an attribute with the +// field name in the XML element. +// - a field with tag ",chardata" is written as character data, +// not as an XML element. +// - a field with tag ",innerxml" is written verbatim, not subject +// to the usual marshalling procedure. +// - a field with tag ",comment" is written as an XML comment, not +// subject to the usual marshalling procedure. It must not contain +// the "--" string within it. +// - a field with a tag including the "omitempty" option is omitted +// if the field value is empty. The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or +// string of length zero. +// - an anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// If a field uses a tag "a>b>c", then the element c will be nested inside +// parent elements a and b. Fields that appear next to each other that name +// the same parent will be enclosed in one XML element. +// +// See MarshalIndent for an example. +// +// Marshal will return an error if asked to marshal a channel, function, or map. +func Marshal(v interface{}) ([]byte, error) { + var b bytes.Buffer + if err := NewEncoder(&b).Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Marshaler is the interface implemented by objects that can marshal +// themselves into valid XML elements. +// +// MarshalXML encodes the receiver as zero or more XML elements. +// By convention, arrays or slices are typically encoded as a sequence +// of elements, one per entry. +// Using start as the element tag is not required, but doing so +// will enable Unmarshal to match the XML elements to the correct +// struct field. +// One common implementation strategy is to construct a separate +// value with a layout corresponding to the desired XML and then +// to encode it using e.EncodeElement. +// Another common strategy is to use repeated calls to e.EncodeToken +// to generate the XML output one token at a time. +// The sequence of encoded tokens must make up zero or more valid +// XML elements. +type Marshaler interface { + MarshalXML(e *Encoder, start StartElement) error +} + +// MarshalerAttr is the interface implemented by objects that can marshal +// themselves into valid XML attributes. +// +// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver. +// Using name as the attribute name is not required, but doing so +// will enable Unmarshal to match the attribute to the correct +// struct field. +// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute +// will be generated in the output. +// MarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type MarshalerAttr interface { + MarshalXMLAttr(name Name) (Attr, error) +} + +// MarshalIndent works like Marshal, but each XML element begins on a new +// indented line that starts with prefix and is followed by one or more +// copies of indent according to the nesting depth. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + var b bytes.Buffer + enc := NewEncoder(&b) + enc.Indent(prefix, indent) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// An Encoder writes XML data to an output stream. +type Encoder struct { + p printer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{printer{Writer: bufio.NewWriter(w)}} + e.p.encoder = e + return e +} + +// Indent sets the encoder to generate XML in which each element +// begins on a new indented line that starts with prefix and is followed by +// one or more copies of indent according to the nesting depth. +func (enc *Encoder) Indent(prefix, indent string) { + enc.p.prefix = prefix + enc.p.indent = indent +} + +// Encode writes the XML encoding of v to the stream. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// Encode calls Flush before returning. +func (enc *Encoder) Encode(v interface{}) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil) + if err != nil { + return err + } + return enc.p.Flush() +} + +// EncodeElement writes the XML encoding of v to the stream, +// using start as the outermost tag in the encoding. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// EncodeElement calls Flush before returning. +func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start) + if err != nil { + return err + } + return enc.p.Flush() +} + +var ( + begComment = []byte("") + endProcInst = []byte("?>") + endDirective = []byte(">") +) + +// EncodeToken writes the given XML token to the stream. +// It returns an error if StartElement and EndElement tokens are not +// properly matched. +// +// EncodeToken does not call Flush, because usually it is part of a +// larger operation such as Encode or EncodeElement (or a custom +// Marshaler's MarshalXML invoked during those), and those will call +// Flush when finished. Callers that create an Encoder and then invoke +// EncodeToken directly, without using Encode or EncodeElement, need to +// call Flush when finished to ensure that the XML is written to the +// underlying writer. +// +// EncodeToken allows writing a ProcInst with Target set to "xml" only +// as the first token in the stream. +// +// When encoding a StartElement holding an XML namespace prefix +// declaration for a prefix that is not already declared, contained +// elements (including the StartElement itself) will use the declared +// prefix when encoding names with matching namespace URIs. +func (enc *Encoder) EncodeToken(t Token) error { + + p := &enc.p + switch t := t.(type) { + case StartElement: + if err := p.writeStart(&t); err != nil { + return err + } + case EndElement: + if err := p.writeEnd(t.Name); err != nil { + return err + } + case CharData: + escapeText(p, t, false) + case Comment: + if bytes.Contains(t, endComment) { + return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") + } + p.WriteString("") + return p.cachedWriteError() + case ProcInst: + // First token to be encoded which is also a ProcInst with target of xml + // is the xml declaration. The only ProcInst where target of xml is allowed. + if t.Target == "xml" && p.Buffered() != 0 { + return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") + } + if !isNameString(t.Target) { + return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target") + } + if bytes.Contains(t.Inst, endProcInst) { + return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker") + } + p.WriteString(" 0 { + p.WriteByte(' ') + p.Write(t.Inst) + } + p.WriteString("?>") + case Directive: + if !isValidDirective(t) { + return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers") + } + p.WriteString("") + default: + return fmt.Errorf("xml: EncodeToken of invalid token type") + + } + return p.cachedWriteError() +} + +// isValidDirective reports whether dir is a valid directive text, +// meaning angle brackets are matched, ignoring comments and strings. +func isValidDirective(dir Directive) bool { + var ( + depth int + inquote uint8 + incomment bool + ) + for i, c := range dir { + switch { + case incomment: + if c == '>' { + if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) { + incomment = false + } + } + // Just ignore anything in comment + case inquote != 0: + if c == inquote { + inquote = 0 + } + // Just ignore anything within quotes + case c == '\'' || c == '"': + inquote = c + case c == '<': + if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) { + incomment = true + } else { + depth++ + } + case c == '>': + if depth == 0 { + return false + } + depth-- + } + } + return depth == 0 && inquote == 0 && !incomment +} + +// Flush flushes any buffered XML to the underlying writer. +// See the EncodeToken documentation for details about when it is necessary. +func (enc *Encoder) Flush() error { + return enc.p.Flush() +} + +type printer struct { + *bufio.Writer + encoder *Encoder + seq int + indent string + prefix string + depth int + indentedIn bool + putNewline bool + defaultNS string + attrNS map[string]string // map prefix -> name space + attrPrefix map[string]string // map name space -> prefix + prefixes []printerPrefix + tags []Name +} + +// printerPrefix holds a namespace undo record. +// When an element is popped, the prefix record +// is set back to the recorded URL. The empty +// prefix records the URL for the default name space. +// +// The start of an element is recorded with an element +// that has mark=true. +type printerPrefix struct { + prefix string + url string + mark bool +} + +func (p *printer) prefixForNS(url string, isAttr bool) string { + // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml" + // and must be referred to that way. + // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", + // but users should not be trying to use that one directly - that's our job.) + if url == xmlURL { + return "xml" + } + if !isAttr && url == p.defaultNS { + // We can use the default name space. + return "" + } + return p.attrPrefix[url] +} + +// defineNS pushes any namespace definition found in the given attribute. +// If ignoreNonEmptyDefault is true, an xmlns="nonempty" +// attribute will be ignored. +func (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error { + var prefix string + if attr.Name.Local == "xmlns" { + if attr.Name.Space != "" && attr.Name.Space != "xml" && attr.Name.Space != xmlURL { + return fmt.Errorf("xml: cannot redefine xmlns attribute prefix") + } + } else if attr.Name.Space == "xmlns" && attr.Name.Local != "" { + prefix = attr.Name.Local + if attr.Value == "" { + // Technically, an empty XML namespace is allowed for an attribute. + // From http://www.w3.org/TR/xml-names11/#scoping-defaulting: + // + // The attribute value in a namespace declaration for a prefix may be + // empty. This has the effect, within the scope of the declaration, of removing + // any association of the prefix with a namespace name. + // + // However our namespace prefixes here are used only as hints. There's + // no need to respect the removal of a namespace prefix, so we ignore it. + return nil + } + } else { + // Ignore: it's not a namespace definition + return nil + } + if prefix == "" { + if attr.Value == p.defaultNS { + // No need for redefinition. + return nil + } + if attr.Value != "" && ignoreNonEmptyDefault { + // We have an xmlns="..." value but + // it can't define a name space in this context, + // probably because the element has an empty + // name space. In this case, we just ignore + // the name space declaration. + return nil + } + } else if _, ok := p.attrPrefix[attr.Value]; ok { + // There's already a prefix for the given name space, + // so use that. This prevents us from + // having two prefixes for the same name space + // so attrNS and attrPrefix can remain bijective. + return nil + } + p.pushPrefix(prefix, attr.Value) + return nil +} + +// createNSPrefix creates a name space prefix attribute +// to use for the given name space, defining a new prefix +// if necessary. +// If isAttr is true, the prefix is to be created for an attribute +// prefix, which means that the default name space cannot +// be used. +func (p *printer) createNSPrefix(url string, isAttr bool) { + if _, ok := p.attrPrefix[url]; ok { + // We already have a prefix for the given URL. + return + } + switch { + case !isAttr && url == p.defaultNS: + // We can use the default name space. + return + case url == "": + // The only way we can encode names in the empty + // name space is by using the default name space, + // so we must use that. + if p.defaultNS != "" { + // The default namespace is non-empty, so we + // need to set it to empty. + p.pushPrefix("", "") + } + return + case url == xmlURL: + return + } + // TODO If the URL is an existing prefix, we could + // use it as is. That would enable the + // marshaling of elements that had been unmarshaled + // and with a name space prefix that was not found. + // although technically it would be incorrect. + + // Pick a name. We try to use the final element of the path + // but fall back to _. + prefix := strings.TrimRight(url, "/") + if i := strings.LastIndex(prefix, "/"); i >= 0 { + prefix = prefix[i+1:] + } + if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") { + prefix = "_" + } + if strings.HasPrefix(prefix, "xml") { + // xmlanything is reserved. + prefix = "_" + prefix + } + if p.attrNS[prefix] != "" { + // Name is taken. Find a better one. + for p.seq++; ; p.seq++ { + if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" { + prefix = id + break + } + } + } + + p.pushPrefix(prefix, url) +} + +// writeNamespaces writes xmlns attributes for all the +// namespace prefixes that have been defined in +// the current element. +func (p *printer) writeNamespaces() { + for i := len(p.prefixes) - 1; i >= 0; i-- { + prefix := p.prefixes[i] + if prefix.mark { + return + } + p.WriteString(" ") + if prefix.prefix == "" { + // Default name space. + p.WriteString(`xmlns="`) + } else { + p.WriteString("xmlns:") + p.WriteString(prefix.prefix) + p.WriteString(`="`) + } + EscapeText(p, []byte(p.nsForPrefix(prefix.prefix))) + p.WriteString(`"`) + } +} + +// pushPrefix pushes a new prefix on the prefix stack +// without checking to see if it is already defined. +func (p *printer) pushPrefix(prefix, url string) { + p.prefixes = append(p.prefixes, printerPrefix{ + prefix: prefix, + url: p.nsForPrefix(prefix), + }) + p.setAttrPrefix(prefix, url) +} + +// nsForPrefix returns the name space for the given +// prefix. Note that this is not valid for the +// empty attribute prefix, which always has an empty +// name space. +func (p *printer) nsForPrefix(prefix string) string { + if prefix == "" { + return p.defaultNS + } + return p.attrNS[prefix] +} + +// markPrefix marks the start of an element on the prefix +// stack. +func (p *printer) markPrefix() { + p.prefixes = append(p.prefixes, printerPrefix{ + mark: true, + }) +} + +// popPrefix pops all defined prefixes for the current +// element. +func (p *printer) popPrefix() { + for len(p.prefixes) > 0 { + prefix := p.prefixes[len(p.prefixes)-1] + p.prefixes = p.prefixes[:len(p.prefixes)-1] + if prefix.mark { + break + } + p.setAttrPrefix(prefix.prefix, prefix.url) + } +} + +// setAttrPrefix sets an attribute name space prefix. +// If url is empty, the attribute is removed. +// If prefix is empty, the default name space is set. +func (p *printer) setAttrPrefix(prefix, url string) { + if prefix == "" { + p.defaultNS = url + return + } + if url == "" { + delete(p.attrPrefix, p.attrNS[prefix]) + delete(p.attrNS, prefix) + return + } + if p.attrPrefix == nil { + // Need to define a new name space. + p.attrPrefix = make(map[string]string) + p.attrNS = make(map[string]string) + } + // Remove any old prefix value. This is OK because we maintain a + // strict one-to-one mapping between prefix and URL (see + // defineNS) + delete(p.attrPrefix, p.attrNS[prefix]) + p.attrPrefix[url] = prefix + p.attrNS[prefix] = url +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +) + +// marshalValue writes one or more XML elements representing val. +// If val was obtained from a struct field, finfo must have its details. +func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { + if startTemplate != nil && startTemplate.Name.Local == "" { + return fmt.Errorf("xml: EncodeElement of StartElement with missing name") + } + + if !val.IsValid() { + return nil + } + if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) { + return nil + } + + // Drill into interfaces and pointers. + // This can turn into an infinite loop given a cyclic chain, + // but it matches the Go 1 behavior. + for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + if val.IsNil() { + return nil + } + val = val.Elem() + } + + kind := val.Kind() + typ := val.Type() + + // Check for marshaler. + if val.CanInterface() && typ.Implements(marshalerType) { + return p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerType) { + return p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Check for text marshaler. + if val.CanInterface() && typ.Implements(textMarshalerType) { + return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Slices and arrays iterate over the elements. They do not have an enclosing tag. + if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 { + for i, n := 0, val.Len(); i < n; i++ { + if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil { + return err + } + } + return nil + } + + tinfo, err := getTypeInfo(typ) + if err != nil { + return err + } + + // Create start element. + // Precedence for the XML element name is: + // 0. startTemplate + // 1. XMLName field in underlying struct; + // 2. field name/tag in the struct field; and + // 3. type name + var start StartElement + + // explicitNS records whether the element's name space has been + // explicitly set (for example an XMLName field). + explicitNS := false + + if startTemplate != nil { + start.Name = startTemplate.Name + explicitNS = true + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if tinfo.xmlname != nil { + xmlname := tinfo.xmlname + if xmlname.name != "" { + start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name + } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { + start.Name = v + } + explicitNS = true + } + if start.Name.Local == "" && finfo != nil { + start.Name.Local = finfo.name + if finfo.xmlns != "" { + start.Name.Space = finfo.xmlns + explicitNS = true + } + } + if start.Name.Local == "" { + name := typ.Name() + if name == "" { + return &UnsupportedTypeError{typ} + } + start.Name.Local = name + } + + // defaultNS records the default name space as set by a xmlns="..." + // attribute. We don't set p.defaultNS because we want to let + // the attribute writing code (in p.defineNS) be solely responsible + // for maintaining that. + defaultNS := p.defaultNS + + // Attributes + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr == 0 { + continue + } + attr, err := p.fieldAttr(finfo, val) + if err != nil { + return err + } + if attr.Name.Local == "" { + continue + } + start.Attr = append(start.Attr, attr) + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + defaultNS = attr.Value + } + } + if !explicitNS { + // Historic behavior: elements use the default name space + // they are contained in by default. + start.Name.Space = defaultNS + } + // Historic behaviour: an element that's in a namespace sets + // the default namespace for all elements contained within it. + start.setDefaultNamespace() + + if err := p.writeStart(&start); err != nil { + return err + } + + if val.Kind() == reflect.Struct { + err = p.marshalStruct(tinfo, val) + } else { + s, b, err1 := p.marshalSimple(typ, val) + if err1 != nil { + err = err1 + } else if b != nil { + EscapeText(p, b) + } else { + p.EscapeString(s) + } + } + if err != nil { + return err + } + + if err := p.writeEnd(start.Name); err != nil { + return err + } + + return p.cachedWriteError() +} + +// fieldAttr returns the attribute of the given field. +// If the returned attribute has an empty Name.Local, +// it should not be used. +// The given value holds the value containing the field. +func (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) { + fv := finfo.value(val) + name := Name{Space: finfo.xmlns, Local: finfo.name} + if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { + return Attr{}, nil + } + if fv.Kind() == reflect.Interface && fv.IsNil() { + return Attr{}, nil + } + if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { + attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { + attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + } + if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { + text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + } + // Dereference or skip nil pointer, interface values. + switch fv.Kind() { + case reflect.Ptr, reflect.Interface: + if fv.IsNil() { + return Attr{}, nil + } + fv = fv.Elem() + } + s, b, err := p.marshalSimple(fv.Type(), fv) + if err != nil { + return Attr{}, err + } + if b != nil { + s = string(b) + } + return Attr{name, s}, nil +} + +// defaultStart returns the default start element to use, +// given the reflect type, field info, and start template. +func (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { + var start StartElement + // Precedence for the XML element name is as above, + // except that we do not look inside structs for the first field. + if startTemplate != nil { + start.Name = startTemplate.Name + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if finfo != nil && finfo.name != "" { + start.Name.Local = finfo.name + start.Name.Space = finfo.xmlns + } else if typ.Name() != "" { + start.Name.Local = typ.Name() + } else { + // Must be a pointer to a named type, + // since it has the Marshaler methods. + start.Name.Local = typ.Elem().Name() + } + // Historic behaviour: elements use the name space of + // the element they are contained in by default. + if start.Name.Space == "" { + start.Name.Space = p.defaultNS + } + start.setDefaultNamespace() + return start +} + +// marshalInterface marshals a Marshaler interface value. +func (p *printer) marshalInterface(val Marshaler, start StartElement) error { + // Push a marker onto the tag stack so that MarshalXML + // cannot close the XML tags that it did not open. + p.tags = append(p.tags, Name{}) + n := len(p.tags) + + err := val.MarshalXML(p.encoder, start) + if err != nil { + return err + } + + // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark. + if len(p.tags) > n { + return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local) + } + p.tags = p.tags[:n-1] + return nil +} + +// marshalTextInterface marshals a TextMarshaler interface value. +func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error { + if err := p.writeStart(&start); err != nil { + return err + } + text, err := val.MarshalText() + if err != nil { + return err + } + EscapeText(p, text) + return p.writeEnd(start.Name) +} + +// writeStart writes the given start element. +func (p *printer) writeStart(start *StartElement) error { + if start.Name.Local == "" { + return fmt.Errorf("xml: start tag with no name") + } + + p.tags = append(p.tags, start.Name) + p.markPrefix() + // Define any name spaces explicitly declared in the attributes. + // We do this as a separate pass so that explicitly declared prefixes + // will take precedence over implicitly declared prefixes + // regardless of the order of the attributes. + ignoreNonEmptyDefault := start.Name.Space == "" + for _, attr := range start.Attr { + if err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil { + return err + } + } + // Define any new name spaces implied by the attributes. + for _, attr := range start.Attr { + name := attr.Name + // From http://www.w3.org/TR/xml-names11/#defaulting + // "Default namespace declarations do not apply directly + // to attribute names; the interpretation of unprefixed + // attributes is determined by the element on which they + // appear." + // This means we don't need to create a new namespace + // when an attribute name space is empty. + if name.Space != "" && !name.isNamespace() { + p.createNSPrefix(name.Space, true) + } + } + p.createNSPrefix(start.Name.Space, false) + + p.writeIndent(1) + p.WriteByte('<') + p.writeName(start.Name, false) + p.writeNamespaces() + for _, attr := range start.Attr { + name := attr.Name + if name.Local == "" || name.isNamespace() { + // Namespaces have already been written by writeNamespaces above. + continue + } + p.WriteByte(' ') + p.writeName(name, true) + p.WriteString(`="`) + p.EscapeString(attr.Value) + p.WriteByte('"') + } + p.WriteByte('>') + return nil +} + +// writeName writes the given name. It assumes +// that p.createNSPrefix(name) has already been called. +func (p *printer) writeName(name Name, isAttr bool) { + if prefix := p.prefixForNS(name.Space, isAttr); prefix != "" { + p.WriteString(prefix) + p.WriteByte(':') + } + p.WriteString(name.Local) +} + +func (p *printer) writeEnd(name Name) error { + if name.Local == "" { + return fmt.Errorf("xml: end tag with no name") + } + if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" { + return fmt.Errorf("xml: end tag without start tag", name.Local) + } + if top := p.tags[len(p.tags)-1]; top != name { + if top.Local != name.Local { + return fmt.Errorf("xml: end tag does not match start tag <%s>", name.Local, top.Local) + } + return fmt.Errorf("xml: end tag in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space) + } + p.tags = p.tags[:len(p.tags)-1] + + p.writeIndent(-1) + p.WriteByte('<') + p.WriteByte('/') + p.writeName(name, false) + p.WriteByte('>') + p.popPrefix() + return nil +} + +func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) { + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil, nil + case reflect.Float32, reflect.Float64: + return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil + case reflect.String: + return val.String(), nil, nil + case reflect.Bool: + return strconv.FormatBool(val.Bool()), nil, nil + case reflect.Array: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // [...]byte + var bytes []byte + if val.CanAddr() { + bytes = val.Slice(0, val.Len()).Bytes() + } else { + bytes = make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(bytes), val) + } + return "", bytes, nil + case reflect.Slice: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // []byte + return "", val.Bytes(), nil + } + return "", nil, &UnsupportedTypeError{typ} +} + +var ddBytes = []byte("--") + +func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { + s := parentStack{p: p} + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr != 0 { + continue + } + vf := finfo.value(val) + + // Dereference or skip nil pointer, interface values. + switch vf.Kind() { + case reflect.Ptr, reflect.Interface: + if !vf.IsNil() { + vf = vf.Elem() + } + } + + switch finfo.flags & fMode { + case fCharData: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { + data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + if vf.CanAddr() { + pv := vf.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + } + var scratch [64]byte + switch vf.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) + case reflect.Float32, reflect.Float64: + Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) + case reflect.Bool: + Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) + case reflect.String: + if err := EscapeText(p, []byte(vf.String())); err != nil { + return err + } + case reflect.Slice: + if elem, ok := vf.Interface().([]byte); ok { + if err := EscapeText(p, elem); err != nil { + return err + } + } + } + continue + + case fComment: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + k := vf.Kind() + if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { + return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) + } + if vf.Len() == 0 { + continue + } + p.writeIndent(0) + p.WriteString("" is invalid grammar. Make it "- -->" + p.WriteByte(' ') + } + p.WriteString("-->") + continue + + case fInnerXml: + iface := vf.Interface() + switch raw := iface.(type) { + case []byte: + p.Write(raw) + continue + case string: + p.WriteString(raw) + continue + } + + case fElement, fElement | fAny: + if err := s.setParents(finfo, vf); err != nil { + return err + } + } + if err := p.marshalValue(vf, finfo, nil); err != nil { + return err + } + } + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + return p.cachedWriteError() +} + +var noField fieldInfo + +// return the bufio Writer's cached write error +func (p *printer) cachedWriteError() error { + _, err := p.Write(nil) + return err +} + +func (p *printer) writeIndent(depthDelta int) { + if len(p.prefix) == 0 && len(p.indent) == 0 { + return + } + if depthDelta < 0 { + p.depth-- + if p.indentedIn { + p.indentedIn = false + return + } + p.indentedIn = false + } + if p.putNewline { + p.WriteByte('\n') + } else { + p.putNewline = true + } + if len(p.prefix) > 0 { + p.WriteString(p.prefix) + } + if len(p.indent) > 0 { + for i := 0; i < p.depth; i++ { + p.WriteString(p.indent) + } + } + if depthDelta > 0 { + p.depth++ + p.indentedIn = true + } +} + +type parentStack struct { + p *printer + xmlns string + parents []string +} + +// setParents sets the stack of current parents to those found in finfo. +// It only writes the start elements if vf holds a non-nil value. +// If finfo is &noField, it pops all elements. +func (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error { + xmlns := s.p.defaultNS + if finfo.xmlns != "" { + xmlns = finfo.xmlns + } + commonParents := 0 + if xmlns == s.xmlns { + for ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ { + if finfo.parents[commonParents] != s.parents[commonParents] { + break + } + } + } + // Pop off any parents that aren't in common with the previous field. + for i := len(s.parents) - 1; i >= commonParents; i-- { + if err := s.p.writeEnd(Name{ + Space: s.xmlns, + Local: s.parents[i], + }); err != nil { + return err + } + } + s.parents = finfo.parents + s.xmlns = xmlns + if commonParents >= len(s.parents) { + // No new elements to push. + return nil + } + if (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() { + // The element is nil, so no need for the start elements. + s.parents = s.parents[:commonParents] + return nil + } + // Push any new parents required. + for _, name := range s.parents[commonParents:] { + start := &StartElement{ + Name: Name{ + Space: s.xmlns, + Local: name, + }, + } + // Set the default name space for parent elements + // to match what we do with other elements. + if s.xmlns != s.p.defaultNS { + start.setDefaultNamespace() + } + if err := s.p.writeStart(start); err != nil { + return err + } + } + return nil +} + +// A MarshalXMLError is returned when Marshal encounters a type +// that cannot be converted into XML. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "xml: unsupported type: " + e.Type.String() +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go new file mode 100644 index 000000000..226cfd013 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go @@ -0,0 +1,1939 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +type DriveType int + +const ( + HyperDrive DriveType = iota + ImprobabilityDrive +) + +type Passenger struct { + Name []string `xml:"name"` + Weight float32 `xml:"weight"` +} + +type Ship struct { + XMLName struct{} `xml:"spaceship"` + + Name string `xml:"name,attr"` + Pilot string `xml:"pilot,attr"` + Drive DriveType `xml:"drive"` + Age uint `xml:"age"` + Passenger []*Passenger `xml:"passenger"` + secret string +} + +type NamedType string + +type Port struct { + XMLName struct{} `xml:"port"` + Type string `xml:"type,attr,omitempty"` + Comment string `xml:",comment"` + Number string `xml:",chardata"` +} + +type Domain struct { + XMLName struct{} `xml:"domain"` + Country string `xml:",attr,omitempty"` + Name []byte `xml:",chardata"` + Comment []byte `xml:",comment"` +} + +type Book struct { + XMLName struct{} `xml:"book"` + Title string `xml:",chardata"` +} + +type Event struct { + XMLName struct{} `xml:"event"` + Year int `xml:",chardata"` +} + +type Movie struct { + XMLName struct{} `xml:"movie"` + Length uint `xml:",chardata"` +} + +type Pi struct { + XMLName struct{} `xml:"pi"` + Approximation float32 `xml:",chardata"` +} + +type Universe struct { + XMLName struct{} `xml:"universe"` + Visible float64 `xml:",chardata"` +} + +type Particle struct { + XMLName struct{} `xml:"particle"` + HasMass bool `xml:",chardata"` +} + +type Departure struct { + XMLName struct{} `xml:"departure"` + When time.Time `xml:",chardata"` +} + +type SecretAgent struct { + XMLName struct{} `xml:"agent"` + Handle string `xml:"handle,attr"` + Identity string + Obfuscate string `xml:",innerxml"` +} + +type NestedItems struct { + XMLName struct{} `xml:"result"` + Items []string `xml:">item"` + Item1 []string `xml:"Items>item1"` +} + +type NestedOrder struct { + XMLName struct{} `xml:"result"` + Field1 string `xml:"parent>c"` + Field2 string `xml:"parent>b"` + Field3 string `xml:"parent>a"` +} + +type MixedNested struct { + XMLName struct{} `xml:"result"` + A string `xml:"parent1>a"` + B string `xml:"b"` + C string `xml:"parent1>parent2>c"` + D string `xml:"parent1>d"` +} + +type NilTest struct { + A interface{} `xml:"parent1>parent2>a"` + B interface{} `xml:"parent1>b"` + C interface{} `xml:"parent1>parent2>c"` +} + +type Service struct { + XMLName struct{} `xml:"service"` + Domain *Domain `xml:"host>domain"` + Port *Port `xml:"host>port"` + Extra1 interface{} + Extra2 interface{} `xml:"host>extra2"` +} + +var nilStruct *Ship + +type EmbedA struct { + EmbedC + EmbedB EmbedB + FieldA string +} + +type EmbedB struct { + FieldB string + *EmbedC +} + +type EmbedC struct { + FieldA1 string `xml:"FieldA>A1"` + FieldA2 string `xml:"FieldA>A2"` + FieldB string + FieldC string +} + +type NameCasing struct { + XMLName struct{} `xml:"casing"` + Xy string + XY string + XyA string `xml:"Xy,attr"` + XYA string `xml:"XY,attr"` +} + +type NamePrecedence struct { + XMLName Name `xml:"Parent"` + FromTag XMLNameWithoutTag `xml:"InTag"` + FromNameVal XMLNameWithoutTag + FromNameTag XMLNameWithTag + InFieldName string +} + +type XMLNameWithTag struct { + XMLName Name `xml:"InXMLNameTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithNSTag struct { + XMLName Name `xml:"ns InXMLNameWithNSTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithoutTag struct { + XMLName Name + Value string `xml:",chardata"` +} + +type NameInField struct { + Foo Name `xml:"ns foo"` +} + +type AttrTest struct { + Int int `xml:",attr"` + Named int `xml:"int,attr"` + Float float64 `xml:",attr"` + Uint8 uint8 `xml:",attr"` + Bool bool `xml:",attr"` + Str string `xml:",attr"` + Bytes []byte `xml:",attr"` +} + +type OmitAttrTest struct { + Int int `xml:",attr,omitempty"` + Named int `xml:"int,attr,omitempty"` + Float float64 `xml:",attr,omitempty"` + Uint8 uint8 `xml:",attr,omitempty"` + Bool bool `xml:",attr,omitempty"` + Str string `xml:",attr,omitempty"` + Bytes []byte `xml:",attr,omitempty"` +} + +type OmitFieldTest struct { + Int int `xml:",omitempty"` + Named int `xml:"int,omitempty"` + Float float64 `xml:",omitempty"` + Uint8 uint8 `xml:",omitempty"` + Bool bool `xml:",omitempty"` + Str string `xml:",omitempty"` + Bytes []byte `xml:",omitempty"` + Ptr *PresenceTest `xml:",omitempty"` +} + +type AnyTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField AnyHolder `xml:",any"` +} + +type AnyOmitTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField *AnyHolder `xml:",any,omitempty"` +} + +type AnySliceTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField []AnyHolder `xml:",any"` +} + +type AnyHolder struct { + XMLName Name + XML string `xml:",innerxml"` +} + +type RecurseA struct { + A string + B *RecurseB +} + +type RecurseB struct { + A *RecurseA + B string +} + +type PresenceTest struct { + Exists *struct{} +} + +type IgnoreTest struct { + PublicSecret string `xml:"-"` +} + +type MyBytes []byte + +type Data struct { + Bytes []byte + Attr []byte `xml:",attr"` + Custom MyBytes +} + +type Plain struct { + V interface{} +} + +type MyInt int + +type EmbedInt struct { + MyInt +} + +type Strings struct { + X []string `xml:"A>B,omitempty"` +} + +type PointerFieldsTest struct { + XMLName Name `xml:"dummy"` + Name *string `xml:"name,attr"` + Age *uint `xml:"age,attr"` + Empty *string `xml:"empty,attr"` + Contents *string `xml:",chardata"` +} + +type ChardataEmptyTest struct { + XMLName Name `xml:"test"` + Contents *string `xml:",chardata"` +} + +type MyMarshalerTest struct { +} + +var _ Marshaler = (*MyMarshalerTest)(nil) + +func (m *MyMarshalerTest) MarshalXML(e *Encoder, start StartElement) error { + e.EncodeToken(start) + e.EncodeToken(CharData([]byte("hello world"))) + e.EncodeToken(EndElement{start.Name}) + return nil +} + +type MyMarshalerAttrTest struct{} + +var _ MarshalerAttr = (*MyMarshalerAttrTest)(nil) + +func (m *MyMarshalerAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MyMarshalerValueAttrTest struct{} + +var _ MarshalerAttr = MyMarshalerValueAttrTest{} + +func (m MyMarshalerValueAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MarshalerStruct struct { + Foo MyMarshalerAttrTest `xml:",attr"` +} + +type MarshalerValueStruct struct { + Foo MyMarshalerValueAttrTest `xml:",attr"` +} + +type InnerStruct struct { + XMLName Name `xml:"testns outer"` +} + +type OuterStruct struct { + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterNamedStruct struct { + InnerStruct + XMLName Name `xml:"outerns test"` + IntAttr int `xml:"int,attr"` +} + +type OuterNamedOrderedStruct struct { + XMLName Name `xml:"outerns test"` + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterOuterStruct struct { + OuterStruct +} + +type NestedAndChardata struct { + AB []string `xml:"A>B"` + Chardata string `xml:",chardata"` +} + +type NestedAndComment struct { + AB []string `xml:"A>B"` + Comment string `xml:",comment"` +} + +type XMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body string +} + +type NamedXMLNSFieldStruct struct { + XMLName struct{} `xml:"testns test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type XMLNSFieldStructWithOmitEmpty struct { + Ns string `xml:"xmlns,attr,omitempty"` + Body string +} + +type NamedXMLNSFieldStructWithEmptyNamespace struct { + XMLName struct{} `xml:"test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type RecursiveXMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body *RecursiveXMLNSFieldStruct `xml:",omitempty"` + Text string `xml:",omitempty"` +} + +func ifaceptr(x interface{}) interface{} { + return &x +} + +var ( + nameAttr = "Sarah" + ageAttr = uint(12) + contentsAttr = "lorem ipsum" +) + +// Unless explicitly stated as such (or *Plain), all of the +// tests below are two-way tests. When introducing new tests, +// please try to make them two-way as well to ensure that +// marshalling and unmarshalling are as symmetrical as feasible. +var marshalTests = []struct { + Value interface{} + ExpectXML string + MarshalOnly bool + UnmarshalOnly bool +}{ + // Test nil marshals to nothing + {Value: nil, ExpectXML: ``, MarshalOnly: true}, + {Value: nilStruct, ExpectXML: ``, MarshalOnly: true}, + + // Test value types + {Value: &Plain{true}, ExpectXML: `true`}, + {Value: &Plain{false}, ExpectXML: `false`}, + {Value: &Plain{int(42)}, ExpectXML: `42`}, + {Value: &Plain{int8(42)}, ExpectXML: `42`}, + {Value: &Plain{int16(42)}, ExpectXML: `42`}, + {Value: &Plain{int32(42)}, ExpectXML: `42`}, + {Value: &Plain{uint(42)}, ExpectXML: `42`}, + {Value: &Plain{uint8(42)}, ExpectXML: `42`}, + {Value: &Plain{uint16(42)}, ExpectXML: `42`}, + {Value: &Plain{uint32(42)}, ExpectXML: `42`}, + {Value: &Plain{float32(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{float64(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{uintptr(0xFFDD)}, ExpectXML: `65501`}, + {Value: &Plain{"gopher"}, ExpectXML: `gopher`}, + {Value: &Plain{[]byte("gopher")}, ExpectXML: `gopher`}, + {Value: &Plain{""}, ExpectXML: `</>`}, + {Value: &Plain{[]byte("")}, ExpectXML: `</>`}, + {Value: &Plain{[3]byte{'<', '/', '>'}}, ExpectXML: `</>`}, + {Value: &Plain{NamedType("potato")}, ExpectXML: `potato`}, + {Value: &Plain{[]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: &Plain{[3]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: ifaceptr(true), MarshalOnly: true, ExpectXML: `true`}, + + // Test time. + { + Value: &Plain{time.Unix(1e9, 123456789).UTC()}, + ExpectXML: `2001-09-09T01:46:40.123456789Z`, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A []byte field is only nil if the element was not found. + { + Value: &Data{}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + { + Value: &Data{Bytes: []byte{}, Custom: MyBytes{}, Attr: []byte{}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Check that []byte works, including named []byte types. + { + Value: &Data{Bytes: []byte("ab"), Custom: MyBytes("cd"), Attr: []byte{'v'}}, + ExpectXML: `abcd`, + }, + + // Test innerxml + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + ExpectXML: `James Bond`, + MarshalOnly: true, + }, + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "James Bond", + }, + ExpectXML: `James Bond`, + UnmarshalOnly: true, + }, + + // Test structs + {Value: &Port{Type: "ssl", Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Type: ""}, ExpectXML: ``}, + {Value: &Port{Number: "443", Comment: "https"}, ExpectXML: `443`}, + {Value: &Port{Number: "443", Comment: "add space-"}, ExpectXML: `443`, MarshalOnly: true}, + {Value: &Domain{Name: []byte("google.com&friends")}, ExpectXML: `google.com&friends`}, + {Value: &Domain{Name: []byte("google.com"), Comment: []byte(" &friends ")}, ExpectXML: `google.com`}, + {Value: &Book{Title: "Pride & Prejudice"}, ExpectXML: `Pride & Prejudice`}, + {Value: &Event{Year: -3114}, ExpectXML: `-3114`}, + {Value: &Movie{Length: 13440}, ExpectXML: `13440`}, + {Value: &Pi{Approximation: 3.14159265}, ExpectXML: `3.1415927`}, + {Value: &Universe{Visible: 9.3e13}, ExpectXML: `9.3e+13`}, + {Value: &Particle{HasMass: true}, ExpectXML: `true`}, + {Value: &Departure{When: ParseTime("2013-01-09T00:15:00-09:00")}, ExpectXML: `2013-01-09T00:15:00-09:00`}, + {Value: atomValue, ExpectXML: atomXml}, + { + Value: &Ship{ + Name: "Heart of Gold", + Pilot: "Computer", + Age: 1, + Drive: ImprobabilityDrive, + Passenger: []*Passenger{ + { + Name: []string{"Zaphod", "Beeblebrox"}, + Weight: 7.25, + }, + { + Name: []string{"Trisha", "McMillen"}, + Weight: 5.5, + }, + { + Name: []string{"Ford", "Prefect"}, + Weight: 7, + }, + { + Name: []string{"Arthur", "Dent"}, + Weight: 6.75, + }, + }, + }, + ExpectXML: `` + + `` + strconv.Itoa(int(ImprobabilityDrive)) + `` + + `1` + + `` + + `Zaphod` + + `Beeblebrox` + + `7.25` + + `` + + `` + + `Trisha` + + `McMillen` + + `5.5` + + `` + + `` + + `Ford` + + `Prefect` + + `7` + + `` + + `` + + `Arthur` + + `Dent` + + `6.75` + + `` + + ``, + }, + + // Test a>b + { + Value: &NestedItems{Items: nil, Item1: nil}, + ExpectXML: `` + + `` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{}, Item1: []string{}}, + ExpectXML: `` + + `` + + `` + + ``, + MarshalOnly: true, + }, + { + Value: &NestedItems{Items: nil, Item1: []string{"A"}}, + ExpectXML: `` + + `` + + `A` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: nil}, + ExpectXML: `` + + `` + + `A` + + `B` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: []string{"C"}}, + ExpectXML: `` + + `` + + `A` + + `B` + + `C` + + `` + + ``, + }, + { + Value: &NestedOrder{Field1: "C", Field2: "B", Field3: "A"}, + ExpectXML: `` + + `` + + `C` + + `B` + + `A` + + `` + + ``, + }, + { + Value: &NilTest{A: "A", B: nil, C: "C"}, + ExpectXML: `` + + `` + + `A` + + `C` + + `` + + ``, + MarshalOnly: true, // Uses interface{} + }, + { + Value: &MixedNested{A: "A", B: "B", C: "C", D: "D"}, + ExpectXML: `` + + `A` + + `B` + + `` + + `C` + + `D` + + `` + + ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}}, + ExpectXML: `80`, + }, + { + Value: &Service{}, + ExpectXML: ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra1: "A", Extra2: "B"}, + ExpectXML: `` + + `80` + + `A` + + `B` + + ``, + MarshalOnly: true, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra2: "example"}, + ExpectXML: `` + + `80` + + `example` + + ``, + MarshalOnly: true, + }, + { + Value: &struct { + XMLName struct{} `xml:"space top"` + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + E1 string `xml:"x>e"` + }{ + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + E1: "e1", + }, + ExpectXML: `` + + `abc` + + `` + + `c1` + + `d1` + + `` + + `` + + `e1` + + `` + + ``, + }, + { + Value: &struct { + XMLName Name + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + }{ + XMLName: Name{ + Space: "space0", + Local: "top", + }, + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + }, + ExpectXML: `` + + `ab` + + `c` + + `` + + `c1` + + `d1` + + `` + + ``, + }, + { + Value: &struct { + XMLName struct{} `xml:"top"` + B string `xml:"space x>b"` + B1 string `xml:"space1 x>b"` + }{ + B: "b", + B1: "b1", + }, + ExpectXML: `` + + `b` + + `b1` + + ``, + }, + + // Test struct embedding + { + Value: &EmbedA{ + EmbedC: EmbedC{ + FieldA1: "", // Shadowed by A.A + FieldA2: "", // Shadowed by A.A + FieldB: "A.C.B", + FieldC: "A.C.C", + }, + EmbedB: EmbedB{ + FieldB: "A.B.B", + EmbedC: &EmbedC{ + FieldA1: "A.B.C.A1", + FieldA2: "A.B.C.A2", + FieldB: "", // Shadowed by A.B.B + FieldC: "A.B.C.C", + }, + }, + FieldA: "A.A", + }, + ExpectXML: `` + + `A.C.B` + + `A.C.C` + + `` + + `A.B.B` + + `` + + `A.B.C.A1` + + `A.B.C.A2` + + `` + + `A.B.C.C` + + `` + + `A.A` + + ``, + }, + + // Test that name casing matters + { + Value: &NameCasing{Xy: "mixed", XY: "upper", XyA: "mixedA", XYA: "upperA"}, + ExpectXML: `mixedupper`, + }, + + // Test the order in which the XML element name is chosen + { + Value: &NamePrecedence{ + FromTag: XMLNameWithoutTag{Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "InXMLName"}, Value: "B"}, + FromNameTag: XMLNameWithTag{Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + MarshalOnly: true, + }, + { + Value: &NamePrecedence{ + XMLName: Name{Local: "Parent"}, + FromTag: XMLNameWithoutTag{XMLName: Name{Local: "InTag"}, Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "FromNameVal"}, Value: "B"}, + FromNameTag: XMLNameWithTag{XMLName: Name{Local: "InXMLNameTag"}, Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + UnmarshalOnly: true, + }, + + // xml.Name works in a plain field as well. + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + }, + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Marshaling zero xml.Name uses the tag or field name. + { + Value: &NameInField{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // Test attributes + { + Value: &AttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &AttrTest{Bytes: []byte{}}, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{}, + ExpectXML: ``, + }, + + // pointer fields + { + Value: &PointerFieldsTest{Name: &nameAttr, Age: &ageAttr, Contents: &contentsAttr}, + ExpectXML: `lorem ipsum`, + MarshalOnly: true, + }, + + // empty chardata pointer field + { + Value: &ChardataEmptyTest{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // omitempty on fields + { + Value: &OmitFieldTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + Ptr: &PresenceTest{}, + }, + ExpectXML: `` + + `8` + + `9` + + `23.5` + + `255` + + `true` + + `str` + + `byt` + + `` + + ``, + }, + { + Value: &OmitFieldTest{}, + ExpectXML: ``, + }, + + // Test ",any" + { + ExpectXML: `knownunknown`, + Value: &AnyTest{ + Nested: "known", + AnyField: AnyHolder{ + XMLName: Name{Local: "other"}, + XML: "unknown", + }, + }, + }, + { + Value: &AnyTest{Nested: "known", + AnyField: AnyHolder{ + XML: "", + XMLName: Name{Local: "AnyField"}, + }, + }, + ExpectXML: `known`, + }, + { + ExpectXML: `b`, + Value: &AnyOmitTest{ + Nested: "b", + }, + }, + { + ExpectXML: `bei`, + Value: &AnySliceTest{ + Nested: "b", + AnyField: []AnyHolder{ + { + XMLName: Name{Local: "c"}, + XML: "e", + }, + { + XMLName: Name{Space: "f", Local: "g"}, + XML: "i", + }, + }, + }, + }, + { + ExpectXML: `b`, + Value: &AnySliceTest{ + Nested: "b", + }, + }, + + // Test recursive types. + { + Value: &RecurseA{ + A: "a1", + B: &RecurseB{ + A: &RecurseA{"a2", nil}, + B: "b1", + }, + }, + ExpectXML: `a1a2b1`, + }, + + // Test ignoring fields via "-" tag + { + ExpectXML: ``, + Value: &IgnoreTest{}, + }, + { + ExpectXML: ``, + Value: &IgnoreTest{PublicSecret: "can't tell"}, + MarshalOnly: true, + }, + { + ExpectXML: `ignore me`, + Value: &IgnoreTest{}, + UnmarshalOnly: true, + }, + + // Test escaping. + { + ExpectXML: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + Value: &AnyTest{ + Nested: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + AnyField: AnyHolder{XMLName: Name{Local: "empty"}}, + }, + }, + { + ExpectXML: `newline: ; cr: ; tab: ;`, + Value: &AnyTest{ + Nested: "newline: \n; cr: \r; tab: \t;", + AnyField: AnyHolder{XMLName: Name{Local: "AnyField"}}, + }, + }, + { + ExpectXML: "1\r2\r\n3\n\r4\n5", + Value: &AnyTest{ + Nested: "1\n2\n3\n\n4\n5", + }, + UnmarshalOnly: true, + }, + { + ExpectXML: `42`, + Value: &EmbedInt{ + MyInt: 42, + }, + }, + // Test omitempty with parent chain; see golang.org/issue/4168. + { + ExpectXML: ``, + Value: &Strings{}, + }, + // Custom marshalers. + { + ExpectXML: `hello world`, + Value: &MyMarshalerTest{}, + }, + { + ExpectXML: ``, + Value: &MarshalerStruct{}, + }, + { + ExpectXML: ``, + Value: &MarshalerValueStruct{}, + }, + { + ExpectXML: ``, + Value: &OuterStruct{IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedOrderedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterOuterStruct{OuterStruct{IntAttr: 10}}, + }, + { + ExpectXML: `test`, + Value: &NestedAndChardata{AB: make([]string, 2), Chardata: "test"}, + }, + { + ExpectXML: ``, + Value: &NestedAndComment{AB: make([]string, 2), Comment: "test"}, + }, + { + ExpectXML: `hello world`, + Value: &XMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStruct{Ns: "", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &XMLNSFieldStructWithOmitEmpty{Body: "hello world"}, + }, + { + // The xmlns attribute must be ignored because the + // element is in the empty namespace, so it's not possible + // to set the default namespace to something non-empty. + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStructWithEmptyNamespace{Ns: "foo", Body: "hello world"}, + MarshalOnly: true, + }, + { + ExpectXML: `hello world`, + Value: &RecursiveXMLNSFieldStruct{ + Ns: "foo", + Body: &RecursiveXMLNSFieldStruct{ + Text: "hello world", + }, + }, + }, +} + +func TestMarshal(t *testing.T) { + for idx, test := range marshalTests { + if test.UnmarshalOnly { + continue + } + data, err := Marshal(test.Value) + if err != nil { + t.Errorf("#%d: marshal(%#v): %s", idx, test.Value, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + if strings.Contains(want, "\n") { + t.Errorf("#%d: marshal(%#v):\nHAVE:\n%s\nWANT:\n%s", idx, test.Value, got, want) + } else { + t.Errorf("#%d: marshal(%#v):\nhave %#q\nwant %#q", idx, test.Value, got, want) + } + } + } +} + +type AttrParent struct { + X string `xml:"X>Y,attr"` +} + +type BadAttr struct { + Name []string `xml:"name,attr"` +} + +var marshalErrorTests = []struct { + Value interface{} + Err string + Kind reflect.Kind +}{ + { + Value: make(chan bool), + Err: "xml: unsupported type: chan bool", + Kind: reflect.Chan, + }, + { + Value: map[string]string{ + "question": "What do you get when you multiply six by nine?", + "answer": "42", + }, + Err: "xml: unsupported type: map[string]string", + Kind: reflect.Map, + }, + { + Value: map[*Ship]bool{nil: false}, + Err: "xml: unsupported type: map[*xml.Ship]bool", + Kind: reflect.Map, + }, + { + Value: &Domain{Comment: []byte("f--bar")}, + Err: `xml: comments must not contain "--"`, + }, + // Reject parent chain with attr, never worked; see golang.org/issue/5033. + { + Value: &AttrParent{}, + Err: `xml: X>Y chain not valid with attr flag`, + }, + { + Value: BadAttr{[]string{"X", "Y"}}, + Err: `xml: unsupported type: []string`, + }, +} + +var marshalIndentTests = []struct { + Value interface{} + Prefix string + Indent string + ExpectXML string +}{ + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + Prefix: "", + Indent: "\t", + ExpectXML: fmt.Sprintf("\n\tJames Bond\n"), + }, +} + +func TestMarshalErrors(t *testing.T) { + for idx, test := range marshalErrorTests { + data, err := Marshal(test.Value) + if err == nil { + t.Errorf("#%d: marshal(%#v) = [success] %q, want error %v", idx, test.Value, data, test.Err) + continue + } + if err.Error() != test.Err { + t.Errorf("#%d: marshal(%#v) = [error] %v, want %v", idx, test.Value, err, test.Err) + } + if test.Kind != reflect.Invalid { + if kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind { + t.Errorf("#%d: marshal(%#v) = [error kind] %s, want %s", idx, test.Value, kind, test.Kind) + } + } + } +} + +// Do invertibility testing on the various structures that we test +func TestUnmarshal(t *testing.T) { + for i, test := range marshalTests { + if test.MarshalOnly { + continue + } + if _, ok := test.Value.(*Plain); ok { + continue + } + vt := reflect.TypeOf(test.Value) + dest := reflect.New(vt.Elem()).Interface() + err := Unmarshal([]byte(test.ExpectXML), dest) + + switch fix := dest.(type) { + case *Feed: + fix.Author.InnerXML = "" + for i := range fix.Entry { + fix.Entry[i].Author.InnerXML = "" + } + } + + if err != nil { + t.Errorf("#%d: unexpected error: %#v", i, err) + } else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) { + t.Errorf("#%d: unmarshal(%q):\nhave %#v\nwant %#v", i, test.ExpectXML, got, want) + } + } +} + +func TestMarshalIndent(t *testing.T) { + for i, test := range marshalIndentTests { + data, err := MarshalIndent(test.Value, test.Prefix, test.Indent) + if err != nil { + t.Errorf("#%d: Error: %s", i, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + t.Errorf("#%d: MarshalIndent:\nGot:%s\nWant:\n%s", i, got, want) + } + } +} + +type limitedBytesWriter struct { + w io.Writer + remain int // until writes fail +} + +func (lw *limitedBytesWriter) Write(p []byte) (n int, err error) { + if lw.remain <= 0 { + println("error") + return 0, errors.New("write limit hit") + } + if len(p) > lw.remain { + p = p[:lw.remain] + n, _ = lw.w.Write(p) + lw.remain = 0 + return n, errors.New("write limit hit") + } + n, err = lw.w.Write(p) + lw.remain -= n + return n, err +} + +func TestMarshalWriteErrors(t *testing.T) { + var buf bytes.Buffer + const writeCap = 1024 + w := &limitedBytesWriter{&buf, writeCap} + enc := NewEncoder(w) + var err error + var i int + const n = 4000 + for i = 1; i <= n; i++ { + err = enc.Encode(&Passenger{ + Name: []string{"Alice", "Bob"}, + Weight: 5, + }) + if err != nil { + break + } + } + if err == nil { + t.Error("expected an error") + } + if i == n { + t.Errorf("expected to fail before the end") + } + if buf.Len() != writeCap { + t.Errorf("buf.Len() = %d; want %d", buf.Len(), writeCap) + } +} + +func TestMarshalWriteIOErrors(t *testing.T) { + enc := NewEncoder(errWriter{}) + + expectErr := "unwritable" + err := enc.Encode(&Passenger{}) + if err == nil || err.Error() != expectErr { + t.Errorf("EscapeTest = [error] %v, want %v", err, expectErr) + } +} + +func TestMarshalFlush(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(CharData("hello world")); err != nil { + t.Fatalf("enc.EncodeToken: %v", err) + } + if buf.Len() > 0 { + t.Fatalf("enc.EncodeToken caused actual write: %q", buf.Bytes()) + } + if err := enc.Flush(); err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if buf.String() != "hello world" { + t.Fatalf("after enc.Flush, buf.String() = %q, want %q", buf.String(), "hello world") + } +} + +var encodeElementTests = []struct { + desc string + value interface{} + start StartElement + expectXML string +}{{ + desc: "simple string", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + }, + expectXML: `hello`, +}, { + desc: "string with added attributes", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "x"}, + Value: "y", + }, { + Name: Name{Local: "foo"}, + Value: "bar", + }}, + }, + expectXML: `hello`, +}, { + desc: "start element with default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello`, +}, { + desc: "start element in name space with different default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello`, +}, { + desc: "XMLMarshaler with start element with default name space", + value: &MyMarshalerTest{}, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello world`, +}} + +func TestEncodeElement(t *testing.T) { + for idx, test := range encodeElementTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + err := enc.EncodeElement(test.value, test.start) + if err != nil { + t.Fatalf("enc.EncodeElement: %v", err) + } + err = enc.Flush() + if err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if got, want := buf.String(), test.expectXML; got != want { + t.Errorf("#%d(%s): EncodeElement(%#v, %#v):\nhave %#q\nwant %#q", idx, test.desc, test.value, test.start, got, want) + } + } +} + +func BenchmarkMarshal(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + Marshal(atomValue) + } +} + +func BenchmarkUnmarshal(b *testing.B) { + b.ReportAllocs() + xml := []byte(atomXml) + for i := 0; i < b.N; i++ { + Unmarshal(xml, &Feed{}) + } +} + +// golang.org/issue/6556 +func TestStructPointerMarshal(t *testing.T) { + type A struct { + XMLName string `xml:"a"` + B []interface{} + } + type C struct { + XMLName Name + Value string `xml:"value"` + } + + a := new(A) + a.B = append(a.B, &C{ + XMLName: Name{Local: "c"}, + Value: "x", + }) + + b, err := Marshal(a) + if err != nil { + t.Fatal(err) + } + if x := string(b); x != "x" { + t.Fatal(x) + } + var v A + err = Unmarshal(b, &v) + if err != nil { + t.Fatal(err) + } +} + +var encodeTokenTests = []struct { + desc string + toks []Token + want string + err string +}{{ + desc: "start element with name space", + toks: []Token{ + StartElement{Name{"space", "local"}, nil}, + }, + want: ``, +}, { + desc: "start element with no name", + toks: []Token{ + StartElement{Name{"space", ""}, nil}, + }, + err: "xml: start tag with no name", +}, { + desc: "end element with no name", + toks: []Token{ + EndElement{Name{"space", ""}}, + }, + err: "xml: end tag with no name", +}, { + desc: "char data", + toks: []Token{ + CharData("foo"), + }, + want: `foo`, +}, { + desc: "char data with escaped chars", + toks: []Token{ + CharData(" \t\n"), + }, + want: " \n", +}, { + desc: "comment", + toks: []Token{ + Comment("foo"), + }, + want: ``, +}, { + desc: "comment with invalid content", + toks: []Token{ + Comment("foo-->"), + }, + err: "xml: EncodeToken of Comment containing --> marker", +}, { + desc: "proc instruction", + toks: []Token{ + ProcInst{"Target", []byte("Instruction")}, + }, + want: ``, +}, { + desc: "proc instruction with empty target", + toks: []Token{ + ProcInst{"", []byte("Instruction")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "proc instruction with bad content", + toks: []Token{ + ProcInst{"", []byte("Instruction?>")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "directive", + toks: []Token{ + Directive("foo"), + }, + want: ``, +}, { + desc: "more complex directive", + toks: []Token{ + Directive("DOCTYPE doc [ '> ]"), + }, + want: `'> ]>`, +}, { + desc: "directive instruction with bad name", + toks: []Token{ + Directive("foo>"), + }, + err: "xml: EncodeToken of Directive containing wrong < or > markers", +}, { + desc: "end tag without start tag", + toks: []Token{ + EndElement{Name{"foo", "bar"}}, + }, + err: "xml: end tag without start tag", +}, { + desc: "mismatching end tag local name", + toks: []Token{ + StartElement{Name{"", "foo"}, nil}, + EndElement{Name{"", "bar"}}, + }, + err: "xml: end tag does not match start tag ", + want: ``, +}, { + desc: "mismatching end tag namespace", + toks: []Token{ + StartElement{Name{"space", "foo"}, nil}, + EndElement{Name{"another", "foo"}}, + }, + err: "xml: end tag in namespace another does not match start tag in namespace space", + want: ``, +}, { + desc: "start element with explicit namespace", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "start element with explicit namespace and colliding prefix", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + {Name{"x", "bar"}, "other"}, + }}, + }, + want: ``, +}, { + desc: "start element using previously defined namespace", + toks: []Token{ + StartElement{Name{"", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"space", "x"}, "y"}, + }}, + }, + want: ``, +}, { + desc: "nested name space with same prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space1"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space2"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + EndElement{Name{"", "foo"}}, + EndElement{Name{"", "foo"}}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + }, + want: ``, +}, { + desc: "start element defining several prefixes for the same name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "a"}, "space"}, + {Name{"xmlns", "b"}, "space"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element redefines name space", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element creates alias for default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element defines default name space with existing prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element uses empty attribute name space when default ns defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "redefine xmlns", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"foo", "xmlns"}, "space"}, + }}, + }, + err: `xml: cannot redefine xmlns attribute prefix`, +}, { + desc: "xmlns with explicit name space #1", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xml", "xmlns"}, "space"}, + }}, + }, + want: ``, +}, { + desc: "xmlns with explicit name space #2", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{xmlURL, "xmlns"}, "space"}, + }}, + }, + want: ``, +}, { + desc: "empty name space declaration is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "foo"}, ""}, + }}, + }, + want: ``, +}, { + desc: "attribute with no name is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", ""}, "value"}, + }}, + }, + want: ``, +}, { + desc: "namespace URL with non-valid name", + toks: []Token{ + StartElement{Name{"/34", "foo"}, []Attr{ + {Name{"/34", "x"}, "value"}, + }}, + }, + want: `<_:foo xmlns:_="/34" _:x="value">`, +}, { + desc: "nested element resets default namespace to empty", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, ""}, + {Name{"", "x"}, "value"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element requires empty default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, nil}, + }, + want: ``, +}, { + desc: "attribute uses name space from xmlns", + toks: []Token{ + StartElement{Name{"some/space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + {Name{"some/space", "other"}, "other value"}, + }}, + }, + want: ``, +}, { + desc: "default name space should not be used by attributes", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"xmlns", "bar"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: ``, +}, { + desc: "default name space not used by attributes, not explicitly defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: ``, +}, { + desc: "impossible xmlns declaration", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "bar"}, []Attr{ + {Name{"space", "attr"}, "value"}, + }}, + }, + want: ``, +}} + +func TestEncodeToken(t *testing.T) { +loop: + for i, tt := range encodeTokenTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + var err error + for j, tok := range tt.toks { + err = enc.EncodeToken(tok) + if err != nil && j < len(tt.toks)-1 { + t.Errorf("#%d %s token #%d: %v", i, tt.desc, j, err) + continue loop + } + } + errorf := func(f string, a ...interface{}) { + t.Errorf("#%d %s token #%d:%s", i, tt.desc, len(tt.toks)-1, fmt.Sprintf(f, a...)) + } + switch { + case tt.err != "" && err == nil: + errorf(" expected error; got none") + continue + case tt.err == "" && err != nil: + errorf(" got error: %v", err) + continue + case tt.err != "" && err != nil && tt.err != err.Error(): + errorf(" error mismatch; got %v, want %v", err, tt.err) + continue + } + if err := enc.Flush(); err != nil { + errorf(" %v", err) + continue + } + if got := buf.String(); got != tt.want { + errorf("\ngot %v\nwant %v", got, tt.want) + continue + } + } +} + +func TestProcInstEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to encode xml target ProcInst as first token, %s", err) + } + + if err := enc.EncodeToken(ProcInst{"Target", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to add non-xml target ProcInst") + } + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err == nil { + t.Fatalf("enc.EncodeToken: expected to not be allowed to encode xml target ProcInst when not first token") + } +} + +func TestDecodeEncode(t *testing.T) { + var in, out bytes.Buffer + in.WriteString(` + + + +`) + dec := NewDecoder(&in) + enc := NewEncoder(&out) + for tok, err := dec.Token(); err == nil; tok, err = dec.Token() { + err = enc.EncodeToken(tok) + if err != nil { + t.Fatalf("enc.EncodeToken: Unable to encode token (%#v), %v", tok, err) + } + } +} + +// Issue 9796. Used to fail with GORACE="halt_on_error=1" -race. +func TestRace9796(t *testing.T) { + type A struct{} + type B struct { + C []A `xml:"X>Y"` + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + Marshal(B{[]A{{}}}) + wg.Done() + }() + } + wg.Wait() +} + +func TestIsValidDirective(t *testing.T) { + testOK := []string{ + "<>", + "< < > >", + "' '>' >", + " ]>", + " '<' ' doc ANY> ]>", + ">>> a < comment --> [ ] >", + } + testKO := []string{ + "<", + ">", + "", + "< > > < < >", + " -->", + "", + "'", + "", + } + for _, s := range testOK { + if !isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be valid", s) + } + } + for _, s := range testKO { + if isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be invalid", s) + } + } +} + +// Issue 11719. EncodeToken used to silently eat tokens with an invalid type. +func TestSimpleUseOfEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(&StartElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(&EndElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(StartElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: StartElement %s", err) + } + if err := enc.EncodeToken(EndElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: EndElement %s", err) + } + if err := enc.EncodeToken(Universe{}); err == nil { + t.Errorf("enc.EncodeToken: invalid type not caught") + } + if err := enc.Flush(); err != nil { + t.Errorf("enc.Flush: %s", err) + } + if buf.Len() == 0 { + t.Errorf("enc.EncodeToken: empty buffer") + } + want := "" + if buf.String() != want { + t.Errorf("enc.EncodeToken: expected %q; got %q", want, buf.String()) + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read.go b/vendor/golang.org/x/net/webdav/internal/xml/read.go new file mode 100644 index 000000000..4089056a1 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/read.go @@ -0,0 +1,692 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// BUG(rsc): Mapping between XML elements and data structures is inherently flawed: +// an XML element is an order-dependent collection of anonymous +// values, while a data structure is an order-independent collection +// of named values. +// See package json for a textual representation more suitable +// to data structures. + +// Unmarshal parses the XML-encoded data and stores the result in +// the value pointed to by v, which must be an arbitrary struct, +// slice, or string. Well-formed data that does not fit into v is +// discarded. +// +// Because Unmarshal uses the reflect package, it can only assign +// to exported (upper case) fields. Unmarshal uses a case-sensitive +// comparison to match XML element names to tag values and struct +// field names. +// +// Unmarshal maps an XML element to a struct using the following rules. +// In the rules, the tag of a field refers to the value associated with the +// key 'xml' in the struct field's tag (see the example above). +// +// * If the struct has a field of type []byte or string with tag +// ",innerxml", Unmarshal accumulates the raw XML nested inside the +// element in that field. The rest of the rules still apply. +// +// * If the struct has a field named XMLName of type xml.Name, +// Unmarshal records the element name in that field. +// +// * If the XMLName field has an associated tag of the form +// "name" or "namespace-URL name", the XML element must have +// the given name (and, optionally, name space) or else Unmarshal +// returns an error. +// +// * If the XML element has an attribute whose name matches a +// struct field name with an associated tag containing ",attr" or +// the explicit name in a struct field tag of the form "name,attr", +// Unmarshal records the attribute value in that field. +// +// * If the XML element contains character data, that data is +// accumulated in the first struct field that has tag ",chardata". +// The struct field may have type []byte or string. +// If there is no such field, the character data is discarded. +// +// * If the XML element contains comments, they are accumulated in +// the first struct field that has tag ",comment". The struct +// field may have type []byte or string. If there is no such +// field, the comments are discarded. +// +// * If the XML element contains a sub-element whose name matches +// the prefix of a tag formatted as "a" or "a>b>c", unmarshal +// will descend into the XML structure looking for elements with the +// given names, and will map the innermost elements to that struct +// field. A tag starting with ">" is equivalent to one starting +// with the field name followed by ">". +// +// * If the XML element contains a sub-element whose name matches +// a struct field's XMLName tag and the struct field has no +// explicit name tag as per the previous rule, unmarshal maps +// the sub-element to that struct field. +// +// * If the XML element contains a sub-element whose name matches a +// field without any mode flags (",attr", ",chardata", etc), Unmarshal +// maps the sub-element to that struct field. +// +// * If the XML element contains a sub-element that hasn't matched any +// of the above rules and the struct has a field with tag ",any", +// unmarshal maps the sub-element to that struct field. +// +// * An anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// * A struct field with tag "-" is never unmarshalled into. +// +// Unmarshal maps an XML element to a string or []byte by saving the +// concatenation of that element's character data in the string or +// []byte. The saved []byte is never nil. +// +// Unmarshal maps an attribute value to a string or []byte by saving +// the value in the string or slice. +// +// Unmarshal maps an XML element to a slice by extending the length of +// the slice and mapping the element to the newly created value. +// +// Unmarshal maps an XML element or attribute value to a bool by +// setting it to the boolean value represented by the string. +// +// Unmarshal maps an XML element or attribute value to an integer or +// floating-point field by setting the field to the result of +// interpreting the string value in decimal. There is no check for +// overflow. +// +// Unmarshal maps an XML element to an xml.Name by recording the +// element name. +// +// Unmarshal maps an XML element to a pointer by setting the pointer +// to a freshly allocated value and then mapping the element to that value. +// +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +// Decode works like xml.Unmarshal, except it reads the decoder +// stream to find the start element. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeElement(v, nil) +} + +// DecodeElement works like xml.Unmarshal except that it takes +// a pointer to the start XML element to decode into v. +// It is useful when a client reads some raw XML tokens itself +// but also wants to defer to Unmarshal for some elements. +func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return errors.New("non-pointer passed to Unmarshal") + } + return d.unmarshal(val.Elem(), start) +} + +// An UnmarshalError represents an error in the unmarshalling process. +type UnmarshalError string + +func (e UnmarshalError) Error() string { return string(e) } + +// Unmarshaler is the interface implemented by objects that can unmarshal +// an XML element description of themselves. +// +// UnmarshalXML decodes a single XML element +// beginning with the given start element. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXML must consume exactly one XML element. +// One common implementation strategy is to unmarshal into +// a separate value with a layout matching the expected XML +// using d.DecodeElement, and then to copy the data from +// that value into the receiver. +// Another common strategy is to use d.Token to process the +// XML object one token at a time. +// UnmarshalXML may not use d.RawToken. +type Unmarshaler interface { + UnmarshalXML(d *Decoder, start StartElement) error +} + +// UnmarshalerAttr is the interface implemented by objects that can unmarshal +// an XML attribute description of themselves. +// +// UnmarshalXMLAttr decodes a single XML attribute. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type UnmarshalerAttr interface { + UnmarshalXMLAttr(attr Attr) error +} + +// receiverType returns the receiver type to use in an expression like "%s.MethodName". +func receiverType(val interface{}) string { + t := reflect.TypeOf(val) + if t.Name() != "" { + return t.String() + } + return "(" + t.String() + ")" +} + +// unmarshalInterface unmarshals a single XML element into val. +// start is the opening tag of the element. +func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { + // Record that decoder must stop at end tag corresponding to start. + p.pushEOF() + + p.unmarshalDepth++ + err := val.UnmarshalXML(p, *start) + p.unmarshalDepth-- + if err != nil { + p.popEOF() + return err + } + + if !p.popEOF() { + return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) + } + + return nil +} + +// unmarshalTextInterface unmarshals a single XML element into val. +// The chardata contained in the element (but not its children) +// is passed to the text unmarshaler. +func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { + var buf []byte + depth := 1 + for depth > 0 { + t, err := p.Token() + if err != nil { + return err + } + switch t := t.(type) { + case CharData: + if depth == 1 { + buf = append(buf, t...) + } + case StartElement: + depth++ + case EndElement: + depth-- + } + } + return val.UnmarshalText(buf) +} + +// unmarshalAttr unmarshals a single XML attribute into val. +func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { + return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + } + + // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + } + + copyValue(val, []byte(attr.Value)) + return nil +} + +var ( + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Unmarshal a single XML element into val. +func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { + // Find start element if we need it. + if start == nil { + for { + tok, err := p.Token() + if err != nil { + return err + } + if t, ok := tok.(StartElement); ok { + start = &t + break + } + } + } + + // Load value from interface, but only if the result will be + // usefully addressable. + if val.Kind() == reflect.Interface && !val.IsNil() { + e := val.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() { + val = e + } + } + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return p.unmarshalInterface(val.Interface().(Unmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { + return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) + } + } + + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) + } + } + + var ( + data []byte + saveData reflect.Value + comment []byte + saveComment reflect.Value + saveXML reflect.Value + saveXMLIndex int + saveXMLData []byte + saveAny reflect.Value + sv reflect.Value + tinfo *typeInfo + err error + ) + + switch v := val; v.Kind() { + default: + return errors.New("unknown type " + v.Type().String()) + + case reflect.Interface: + // TODO: For now, simply ignore the field. In the near + // future we may choose to unmarshal the start + // element on it, if not nil. + return p.Skip() + + case reflect.Slice: + typ := v.Type() + if typ.Elem().Kind() == reflect.Uint8 { + // []byte + saveData = v + break + } + + // Slice of element values. + // Grow slice. + n := v.Len() + if n >= v.Cap() { + ncap := 2 * n + if ncap < 4 { + ncap = 4 + } + new := reflect.MakeSlice(typ, n, ncap) + reflect.Copy(new, v) + v.Set(new) + } + v.SetLen(n + 1) + + // Recur to read element into slice. + if err := p.unmarshal(v.Index(n), start); err != nil { + v.SetLen(n) + return err + } + return nil + + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: + saveData = v + + case reflect.Struct: + typ := v.Type() + if typ == nameType { + v.Set(reflect.ValueOf(start.Name)) + break + } + + sv = v + tinfo, err = getTypeInfo(typ) + if err != nil { + return err + } + + // Validate and assign element name. + if tinfo.xmlname != nil { + finfo := tinfo.xmlname + if finfo.name != "" && finfo.name != start.Name.Local { + return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") + } + if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " + if start.Name.Space == "" { + e += "no name space" + } else { + e += start.Name.Space + } + return UnmarshalError(e) + } + fv := finfo.value(sv) + if _, ok := fv.Interface().(Name); ok { + fv.Set(reflect.ValueOf(start.Name)) + } + } + + // Assign attributes. + // Also, determine whether we need to save character data or comments. + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fAttr: + strv := finfo.value(sv) + // Look for attribute. + for _, a := range start.Attr { + if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { + if err := p.unmarshalAttr(strv, a); err != nil { + return err + } + break + } + } + + case fCharData: + if !saveData.IsValid() { + saveData = finfo.value(sv) + } + + case fComment: + if !saveComment.IsValid() { + saveComment = finfo.value(sv) + } + + case fAny, fAny | fElement: + if !saveAny.IsValid() { + saveAny = finfo.value(sv) + } + + case fInnerXml: + if !saveXML.IsValid() { + saveXML = finfo.value(sv) + if p.saved == nil { + saveXMLIndex = 0 + p.saved = new(bytes.Buffer) + } else { + saveXMLIndex = p.savedOffset() + } + } + } + } + } + + // Find end element. + // Process sub-elements along the way. +Loop: + for { + var savedOffset int + if saveXML.IsValid() { + savedOffset = p.savedOffset() + } + tok, err := p.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case StartElement: + consumed := false + if sv.IsValid() { + consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) + if err != nil { + return err + } + if !consumed && saveAny.IsValid() { + consumed = true + if err := p.unmarshal(saveAny, &t); err != nil { + return err + } + } + } + if !consumed { + if err := p.Skip(); err != nil { + return err + } + } + + case EndElement: + if saveXML.IsValid() { + saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] + if saveXMLIndex == 0 { + p.saved = nil + } + } + break Loop + + case CharData: + if saveData.IsValid() { + data = append(data, t...) + } + + case Comment: + if saveComment.IsValid() { + comment = append(comment, t...) + } + } + } + + if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { + if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + + if saveData.IsValid() && saveData.CanAddr() { + pv := saveData.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + } + + if err := copyValue(saveData, data); err != nil { + return err + } + + switch t := saveComment; t.Kind() { + case reflect.String: + t.SetString(string(comment)) + case reflect.Slice: + t.Set(reflect.ValueOf(comment)) + } + + switch t := saveXML; t.Kind() { + case reflect.String: + t.SetString(string(saveXMLData)) + case reflect.Slice: + t.Set(reflect.ValueOf(saveXMLData)) + } + + return nil +} + +func copyValue(dst reflect.Value, src []byte) (err error) { + dst0 := dst + + if dst.Kind() == reflect.Ptr { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + // Save accumulated data. + switch dst.Kind() { + case reflect.Invalid: + // Probably a comment. + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetInt(itmp) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetUint(utmp) + case reflect.Float32, reflect.Float64: + ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) + if err != nil { + return err + } + dst.SetFloat(ftmp) + case reflect.Bool: + value, err := strconv.ParseBool(strings.TrimSpace(string(src))) + if err != nil { + return err + } + dst.SetBool(value) + case reflect.String: + dst.SetString(string(src)) + case reflect.Slice: + if len(src) == 0 { + // non-nil to flag presence + src = []byte{} + } + dst.SetBytes(src) + } + return nil +} + +// unmarshalPath walks down an XML structure looking for wanted +// paths, and calls unmarshal on them. +// The consumed result tells whether XML elements have been consumed +// from the Decoder until start's matching end element, or if it's +// still untouched because start is uninteresting for sv's fields. +func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { + recurse := false +Loop: + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + continue + } + for j := range parents { + if parents[j] != finfo.parents[j] { + continue Loop + } + } + if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { + // It's a perfect match, unmarshal the field. + return true, p.unmarshal(finfo.value(sv), start) + } + if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { + // It's a prefix for the field. Break and recurse + // since it's not ok for one field path to be itself + // the prefix for another field path. + recurse = true + + // We can reuse the same slice as long as we + // don't try to append to it. + parents = finfo.parents[:len(parents)+1] + break + } + } + if !recurse { + // We have no business with this element. + return false, nil + } + // The element is not a perfect match for any field, but one + // or more fields have the path to this element as a parent + // prefix. Recurse and attempt to match these. + for { + var tok Token + tok, err = p.Token() + if err != nil { + return true, err + } + switch t := tok.(type) { + case StartElement: + consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) + if err != nil { + return true, err + } + if !consumed2 { + if err := p.Skip(); err != nil { + return true, err + } + } + case EndElement: + return true, nil + } + } +} + +// Skip reads tokens until it has consumed the end element +// matching the most recent start element already consumed. +// It recurs if it encounters a start element, so it can be used to +// skip nested structures. +// It returns nil if it finds an end element matching the start +// element; otherwise it returns an error describing the problem. +func (d *Decoder) Skip() error { + for { + tok, err := d.Token() + if err != nil { + return err + } + switch tok.(type) { + case StartElement: + if err := d.Skip(); err != nil { + return err + } + case EndElement: + return nil + } + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read_test.go b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go new file mode 100644 index 000000000..02f1e10c3 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go @@ -0,0 +1,744 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" + "testing" + "time" +) + +// Stripped down Atom feed data structures. + +func TestUnmarshalFeed(t *testing.T) { + var f Feed + if err := Unmarshal([]byte(atomFeedString), &f); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(f, atomFeed) { + t.Fatalf("have %#v\nwant %#v", f, atomFeed) + } +} + +// hget http://codereview.appspot.com/rss/mine/rsc +const atomFeedString = ` + +Code Review - My issueshttp://codereview.appspot.com/rietveld<>rietveld: an attempt at pubsubhubbub +2009-10-04T01:35:58+00:00email-address-removedurn:md5:134d9179c41f806be79b3a5f7877d19a + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can&#39;t quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed&#39;s actual URL in +the link rel=&quot;self&quot;, but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +rietveld: correct tab handling +2009-10-03T23:02:17+00:00email-address-removedurn:md5:0a2a4f19bb815101f0ba2904aed7c35a + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn&#39;t know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + + ` + +type Feed struct { + XMLName Name `xml:"http://www.w3.org/2005/Atom feed"` + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated,attr"` + Author Person `xml:"author"` + Entry []Entry `xml:"entry"` +} + +type Entry struct { + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated"` + Author Person `xml:"author"` + Summary Text `xml:"summary"` +} + +type Link struct { + Rel string `xml:"rel,attr,omitempty"` + Href string `xml:"href,attr"` +} + +type Person struct { + Name string `xml:"name"` + URI string `xml:"uri"` + Email string `xml:"email"` + InnerXML string `xml:",innerxml"` +} + +type Text struct { + Type string `xml:"type,attr,omitempty"` + Body string `xml:",chardata"` +} + +var atomFeed = Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Code Review - My issues", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/"}, + {Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"}, + }, + Id: "http://codereview.appspot.com/", + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "rietveld<>", + InnerXML: "rietveld<>", + }, + Entry: []Entry{ + { + Title: "rietveld: an attempt at pubsubhubbub\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/126085"}, + }, + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a", + Summary: Text{ + Type: "html", + Body: ` + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a <link rel="hub" href="hub-server"> tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can't quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed's actual URL in +the link rel="self", but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +`, + }, + }, + { + Title: "rietveld: correct tab handling\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/124106"}, + }, + Updated: ParseTime("2009-10-03T23:02:17+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a", + Summary: Text{ + Type: "html", + Body: ` + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn't know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + +`, + }, + }, + }, +} + +const pathTestString = ` + + 1 + + + A + + + B + + + C + D + + <_> + E + + + 2 + +` + +type PathTestItem struct { + Value string +} + +type PathTestA struct { + Items []PathTestItem `xml:">Item1"` + Before, After string +} + +type PathTestB struct { + Other []PathTestItem `xml:"Items>Item1"` + Before, After string +} + +type PathTestC struct { + Values1 []string `xml:"Items>Item1>Value"` + Values2 []string `xml:"Items>Item2>Value"` + Before, After string +} + +type PathTestSet struct { + Item1 []PathTestItem +} + +type PathTestD struct { + Other PathTestSet `xml:"Items"` + Before, After string +} + +type PathTestE struct { + Underline string `xml:"Items>_>Value"` + Before, After string +} + +var pathTests = []interface{}{ + &PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"}, + &PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"}, + &PathTestE{Underline: "E", Before: "1", After: "2"}, +} + +func TestUnmarshalPaths(t *testing.T) { + for _, pt := range pathTests { + v := reflect.New(reflect.TypeOf(pt).Elem()).Interface() + if err := Unmarshal([]byte(pathTestString), v); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(v, pt) { + t.Fatalf("have %#v\nwant %#v", v, pt) + } + } +} + +type BadPathTestA struct { + First string `xml:"items>item1"` + Other string `xml:"items>item2"` + Second string `xml:"items"` +} + +type BadPathTestB struct { + Other string `xml:"items>item2>value"` + First string `xml:"items>item1"` + Second string `xml:"items>item1>value"` +} + +type BadPathTestC struct { + First string + Second string `xml:"First"` +} + +type BadPathTestD struct { + BadPathEmbeddedA + BadPathEmbeddedB +} + +type BadPathEmbeddedA struct { + First string +} + +type BadPathEmbeddedB struct { + Second string `xml:"First"` +} + +var badPathTests = []struct { + v, e interface{} +}{ + {&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}}, + {&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}}, + {&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}}, + {&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}}, +} + +func TestUnmarshalBadPaths(t *testing.T) { + for _, tt := range badPathTests { + err := Unmarshal([]byte(pathTestString), tt.v) + if !reflect.DeepEqual(err, tt.e) { + t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e) + } + } +} + +const OK = "OK" +const withoutNameTypeData = ` + +` + +type TestThree struct { + XMLName Name `xml:"Test3"` + Attr string `xml:",attr"` +} + +func TestUnmarshalWithoutNameType(t *testing.T) { + var x TestThree + if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if x.Attr != OK { + t.Fatalf("have %v\nwant %v", x.Attr, OK) + } +} + +func TestUnmarshalAttr(t *testing.T) { + type ParamVal struct { + Int int `xml:"int,attr"` + } + + type ParamPtr struct { + Int *int `xml:"int,attr"` + } + + type ParamStringPtr struct { + Int *string `xml:"int,attr"` + } + + x := []byte(``) + + p1 := &ParamPtr{} + if err := Unmarshal(x, p1); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p1.Int == nil { + t.Fatalf("Unmarshal failed in to *int field") + } else if *p1.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1) + } + + p2 := &ParamVal{} + if err := Unmarshal(x, p2); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p2.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1) + } + + p3 := &ParamStringPtr{} + if err := Unmarshal(x, p3); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p3.Int == nil { + t.Fatalf("Unmarshal failed in to *string field") + } else if *p3.Int != "1" { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1) + } +} + +type Tables struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table"` + FTable string `xml:"http://www.w3schools.com/furniture table"` +} + +var tables = []struct { + xml string + tab Tables + ns string +}{ + { + xml: `` + + `hello
    ` + + `world
    ` + + `
    `, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world
    ` + + `hello
    ` + + `
    `, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world` + + `hello` + + ``, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `bogus
    ` + + `
    `, + tab: Tables{}, + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{HTable: "only"}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{FTable: "only"}, + ns: "http://www.w3schools.com/furniture", + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNS(t *testing.T) { + for i, tt := range tables { + var dst Tables + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestRoundTrip(t *testing.T) { + // From issue 7535 + const s = `` + in := bytes.NewBufferString(s) + for i := 0; i < 10; i++ { + out := &bytes.Buffer{} + d := NewDecoder(in) + e := NewEncoder(out) + + for { + t, err := d.Token() + if err == io.EOF { + break + } + if err != nil { + fmt.Println("failed:", err) + return + } + e.EncodeToken(t) + } + e.Flush() + in = out + } + if got := in.String(); got != s { + t.Errorf("have: %q\nwant: %q\n", got, s) + } +} + +func TestMarshalNS(t *testing.T) { + dst := Tables{"hello", "world"} + data, err := Marshal(&dst) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `hello
    world
    ` + str := string(data) + if str != want { + t.Errorf("have: %q\nwant: %q\n", str, want) + } +} + +type TableAttrs struct { + TAttr TAttr +} + +type TAttr struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"` + FTable string `xml:"http://www.w3schools.com/furniture table,attr"` + Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"` + Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"` + Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"` + Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"` + Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"` +} + +var tableAttrs = []struct { + xml string + tab TableAttrs + ns string +}{ + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + ns: "http://www.w3schools.com/furniture", + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: ``, + tab: TableAttrs{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNSAttr(t *testing.T) { + for i, tt := range tableAttrs { + var dst TableAttrs + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestMarshalNSAttr(t *testing.T) { + src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}} + data, err := Marshal(&src) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `` + str := string(data) + if str != want { + t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want) + } + + var dst TableAttrs + if err := Unmarshal(data, &dst); err != nil { + t.Errorf("Unmarshal: %v", err) + } + + if dst != src { + t.Errorf("Unmarshal = %q, want %q", dst, src) + } +} + +type MyCharData struct { + body string +} + +func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error { + for { + t, err := d.Token() + if err == io.EOF { // found end of element + break + } + if err != nil { + return err + } + if char, ok := t.(CharData); ok { + m.body += string(char) + } + } + return nil +} + +var _ Unmarshaler = (*MyCharData)(nil) + +func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error { + panic("must not call") +} + +type MyAttr struct { + attr string +} + +func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error { + m.attr = attr.Value + return nil +} + +var _ UnmarshalerAttr = (*MyAttr)(nil) + +type MyStruct struct { + Data *MyCharData + Attr *MyAttr `xml:",attr"` + + Data2 MyCharData + Attr2 MyAttr `xml:",attr"` +} + +func TestUnmarshaler(t *testing.T) { + xml := ` + + hello world + howdy world + + ` + + var m MyStruct + if err := Unmarshal([]byte(xml), &m); err != nil { + t.Fatal(err) + } + + if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" { + t.Errorf("m=%#+v\n", m) + } +} + +type Pea struct { + Cotelydon string +} + +type Pod struct { + Pea interface{} `xml:"Pea"` +} + +// https://golang.org/issue/6836 +func TestUnmarshalIntoInterface(t *testing.T) { + pod := new(Pod) + pod.Pea = new(Pea) + xml := `Green stuff` + err := Unmarshal([]byte(xml), pod) + if err != nil { + t.Fatalf("failed to unmarshal %q: %v", xml, err) + } + pea, ok := pod.Pea.(*Pea) + if !ok { + t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea) + } + have, want := pea.Cotelydon, "Green stuff" + if have != want { + t.Errorf("failed to unmarshal into interface, have %q want %q", have, want) + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go new file mode 100644 index 000000000..fdde288bc --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go @@ -0,0 +1,371 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// typeInfo holds details for the xml representation of a type. +type typeInfo struct { + xmlname *fieldInfo + fields []fieldInfo +} + +// fieldInfo holds details for the xml representation of a single field. +type fieldInfo struct { + idx []int + name string + xmlns string + flags fieldFlags + parents []string +} + +type fieldFlags int + +const ( + fElement fieldFlags = 1 << iota + fAttr + fCharData + fInnerXml + fComment + fAny + + fOmitEmpty + + fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny +) + +var tinfoMap = make(map[reflect.Type]*typeInfo) +var tinfoLock sync.RWMutex + +var nameType = reflect.TypeOf(Name{}) + +// getTypeInfo returns the typeInfo structure with details necessary +// for marshalling and unmarshalling typ. +func getTypeInfo(typ reflect.Type) (*typeInfo, error) { + tinfoLock.RLock() + tinfo, ok := tinfoMap[typ] + tinfoLock.RUnlock() + if ok { + return tinfo, nil + } + tinfo = &typeInfo{} + if typ.Kind() == reflect.Struct && typ != nameType { + n := typ.NumField() + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.PkgPath != "" || f.Tag.Get("xml") == "-" { + continue // Private field + } + + // For embedded structs, embed its fields. + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Struct { + inner, err := getTypeInfo(t) + if err != nil { + return nil, err + } + if tinfo.xmlname == nil { + tinfo.xmlname = inner.xmlname + } + for _, finfo := range inner.fields { + finfo.idx = append([]int{i}, finfo.idx...) + if err := addFieldInfo(typ, tinfo, &finfo); err != nil { + return nil, err + } + } + continue + } + } + + finfo, err := structFieldInfo(typ, &f) + if err != nil { + return nil, err + } + + if f.Name == "XMLName" { + tinfo.xmlname = finfo + continue + } + + // Add the field if it doesn't conflict with other fields. + if err := addFieldInfo(typ, tinfo, finfo); err != nil { + return nil, err + } + } + } + tinfoLock.Lock() + tinfoMap[typ] = tinfo + tinfoLock.Unlock() + return tinfo, nil +} + +// structFieldInfo builds and returns a fieldInfo for f. +func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { + finfo := &fieldInfo{idx: f.Index} + + // Split the tag from the xml namespace if necessary. + tag := f.Tag.Get("xml") + if i := strings.Index(tag, " "); i >= 0 { + finfo.xmlns, tag = tag[:i], tag[i+1:] + } + + // Parse flags. + tokens := strings.Split(tag, ",") + if len(tokens) == 1 { + finfo.flags = fElement + } else { + tag = tokens[0] + for _, flag := range tokens[1:] { + switch flag { + case "attr": + finfo.flags |= fAttr + case "chardata": + finfo.flags |= fCharData + case "innerxml": + finfo.flags |= fInnerXml + case "comment": + finfo.flags |= fComment + case "any": + finfo.flags |= fAny + case "omitempty": + finfo.flags |= fOmitEmpty + } + } + + // Validate the flags used. + valid := true + switch mode := finfo.flags & fMode; mode { + case 0: + finfo.flags |= fElement + case fAttr, fCharData, fInnerXml, fComment, fAny: + if f.Name == "XMLName" || tag != "" && mode != fAttr { + valid = false + } + default: + // This will also catch multiple modes in a single field. + valid = false + } + if finfo.flags&fMode == fAny { + finfo.flags |= fElement + } + if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 { + valid = false + } + if !valid { + return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + } + + // Use of xmlns without a name is not allowed. + if finfo.xmlns != "" && tag == "" { + return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + + if f.Name == "XMLName" { + // The XMLName field records the XML element name. Don't + // process it as usual because its name should default to + // empty rather than to the field name. + finfo.name = tag + return finfo, nil + } + + if tag == "" { + // If the name part of the tag is completely empty, get + // default from XMLName of underlying struct if feasible, + // or field name otherwise. + if xmlname := lookupXMLName(f.Type); xmlname != nil { + finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name + } else { + finfo.name = f.Name + } + return finfo, nil + } + + if finfo.xmlns == "" && finfo.flags&fAttr == 0 { + // If it's an element no namespace specified, get the default + // from the XMLName of enclosing struct if possible. + if xmlname := lookupXMLName(typ); xmlname != nil { + finfo.xmlns = xmlname.xmlns + } + } + + // Prepare field name and parents. + parents := strings.Split(tag, ">") + if parents[0] == "" { + parents[0] = f.Name + } + if parents[len(parents)-1] == "" { + return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ) + } + finfo.name = parents[len(parents)-1] + if len(parents) > 1 { + if (finfo.flags & fElement) == 0 { + return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ",")) + } + finfo.parents = parents[:len(parents)-1] + } + + // If the field type has an XMLName field, the names must match + // so that the behavior of both marshalling and unmarshalling + // is straightforward and unambiguous. + if finfo.flags&fElement != 0 { + ftyp := f.Type + xmlname := lookupXMLName(ftyp) + if xmlname != nil && xmlname.name != finfo.name { + return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName", + finfo.name, typ, f.Name, xmlname.name, ftyp) + } + } + return finfo, nil +} + +// lookupXMLName returns the fieldInfo for typ's XMLName field +// in case it exists and has a valid xml field tag, otherwise +// it returns nil. +func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + if typ.Kind() != reflect.Struct { + return nil + } + for i, n := 0, typ.NumField(); i < n; i++ { + f := typ.Field(i) + if f.Name != "XMLName" { + continue + } + finfo, err := structFieldInfo(typ, &f) + if finfo.name != "" && err == nil { + return finfo + } + // Also consider errors as a non-existent field tag + // and let getTypeInfo itself report the error. + break + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// addFieldInfo adds finfo to tinfo.fields if there are no +// conflicts, or if conflicts arise from previous fields that were +// obtained from deeper embedded structures than finfo. In the latter +// case, the conflicting entries are dropped. +// A conflict occurs when the path (parent + name) to a field is +// itself a prefix of another path, or when two paths match exactly. +// It is okay for field paths to share a common, shorter prefix. +func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { + var conflicts []int +Loop: + // First, figure all conflicts. Most working code will have none. + for i := range tinfo.fields { + oldf := &tinfo.fields[i] + if oldf.flags&fMode != newf.flags&fMode { + continue + } + if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns { + continue + } + minl := min(len(newf.parents), len(oldf.parents)) + for p := 0; p < minl; p++ { + if oldf.parents[p] != newf.parents[p] { + continue Loop + } + } + if len(oldf.parents) > len(newf.parents) { + if oldf.parents[len(newf.parents)] == newf.name { + conflicts = append(conflicts, i) + } + } else if len(oldf.parents) < len(newf.parents) { + if newf.parents[len(oldf.parents)] == oldf.name { + conflicts = append(conflicts, i) + } + } else { + if newf.name == oldf.name { + conflicts = append(conflicts, i) + } + } + } + // Without conflicts, add the new field and return. + if conflicts == nil { + tinfo.fields = append(tinfo.fields, *newf) + return nil + } + + // If any conflict is shallower, ignore the new field. + // This matches the Go field resolution on embedding. + for _, i := range conflicts { + if len(tinfo.fields[i].idx) < len(newf.idx) { + return nil + } + } + + // Otherwise, if any of them is at the same depth level, it's an error. + for _, i := range conflicts { + oldf := &tinfo.fields[i] + if len(oldf.idx) == len(newf.idx) { + f1 := typ.FieldByIndex(oldf.idx) + f2 := typ.FieldByIndex(newf.idx) + return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")} + } + } + + // Otherwise, the new field is shallower, and thus takes precedence, + // so drop the conflicting fields from tinfo and append the new one. + for c := len(conflicts) - 1; c >= 0; c-- { + i := conflicts[c] + copy(tinfo.fields[i:], tinfo.fields[i+1:]) + tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] + } + tinfo.fields = append(tinfo.fields, *newf) + return nil +} + +// A TagPathError represents an error in the unmarshalling process +// caused by the use of field tags with conflicting paths. +type TagPathError struct { + Struct reflect.Type + Field1, Tag1 string + Field2, Tag2 string +} + +func (e *TagPathError) Error() string { + return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) +} + +// value returns v's field value corresponding to finfo. +// It's equivalent to v.FieldByIndex(finfo.idx), but initializes +// and dereferences pointers as necessary. +func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { + for i, x := range finfo.idx { + if i > 0 { + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/xml.go b/vendor/golang.org/x/net/webdav/internal/xml/xml.go new file mode 100644 index 000000000..5b79cbecb --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/xml.go @@ -0,0 +1,1998 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xml implements a simple XML 1.0 parser that +// understands XML name spaces. +package xml + +// References: +// Annotated XML spec: http://www.xml.com/axml/testaxml.htm +// XML name spaces: http://www.w3.org/TR/REC-xml-names/ + +// TODO(rsc): +// Test error handling. + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A SyntaxError represents a syntax error in the XML input stream. +type SyntaxError struct { + Msg string + Line int +} + +func (e *SyntaxError) Error() string { + return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg +} + +// A Name represents an XML name (Local) annotated with a name space +// identifier (Space). In tokens returned by Decoder.Token, the Space +// identifier is given as a canonical URL, not the short prefix used in +// the document being parsed. +// +// As a special case, XML namespace declarations will use the literal +// string "xmlns" for the Space field instead of the fully resolved URL. +// See Encoder.EncodeToken for more information on namespace encoding +// behaviour. +type Name struct { + Space, Local string +} + +// isNamespace reports whether the name is a namespace-defining name. +func (name Name) isNamespace() bool { + return name.Local == "xmlns" || name.Space == "xmlns" +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +// A Token is an interface holding one of the token types: +// StartElement, EndElement, CharData, Comment, ProcInst, or Directive. +type Token interface{} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// setDefaultNamespace sets the namespace of the element +// as the default for all elements contained within it. +func (e *StartElement) setDefaultNamespace() { + if e.Name.Space == "" { + // If there's no namespace on the element, don't + // set the default. Strictly speaking this might be wrong, as + // we can't tell if the element had no namespace set + // or was just using the default namespace. + return + } + // Don't add a default name space if there's already one set. + for _, attr := range e.Attr { + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + return + } + } + e.Attr = append(e.Attr, Attr{ + Name: Name{ + Local: "xmlns", + }, + Value: e.Name.Space, + }) +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// A CharData represents XML character data (raw text), +// in which XML escape sequences have been replaced by +// the characters they represent. +type CharData []byte + +func makeCopy(b []byte) []byte { + b1 := make([]byte, len(b)) + copy(b1, b) + return b1 +} + +func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } + +// A Comment represents an XML comment of the form . +// The bytes do not include the comment markers. +type Comment []byte + +func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } + +// A ProcInst represents an XML processing instruction of the form +type ProcInst struct { + Target string + Inst []byte +} + +func (p ProcInst) Copy() ProcInst { + p.Inst = makeCopy(p.Inst) + return p +} + +// A Directive represents an XML directive of the form . +// The bytes do not include the markers. +type Directive []byte + +func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } + +// CopyToken returns a copy of a Token. +func CopyToken(t Token) Token { + switch v := t.(type) { + case CharData: + return v.Copy() + case Comment: + return v.Copy() + case Directive: + return v.Copy() + case ProcInst: + return v.Copy() + case StartElement: + return v.Copy() + } + return t +} + +// A Decoder represents an XML parser reading a particular input stream. +// The parser assumes that its input is encoded in UTF-8. +type Decoder struct { + // Strict defaults to true, enforcing the requirements + // of the XML specification. + // If set to false, the parser allows input containing common + // mistakes: + // * If an element is missing an end tag, the parser invents + // end tags as necessary to keep the return values from Token + // properly balanced. + // * In attribute values and character data, unknown or malformed + // character entities (sequences beginning with &) are left alone. + // + // Setting: + // + // d.Strict = false; + // d.AutoClose = HTMLAutoClose; + // d.Entity = HTMLEntity + // + // creates a parser that can handle typical HTML. + // + // Strict mode does not enforce the requirements of the XML name spaces TR. + // In particular it does not reject name space tags using undefined prefixes. + // Such tags are recorded with the unknown prefix as the name space URL. + Strict bool + + // When Strict == false, AutoClose indicates a set of elements to + // consider closed immediately after they are opened, regardless + // of whether an end element is present. + AutoClose []string + + // Entity can be used to map non-standard entity names to string replacements. + // The parser behaves as if these standard mappings are present in the map, + // regardless of the actual map content: + // + // "lt": "<", + // "gt": ">", + // "amp": "&", + // "apos": "'", + // "quot": `"`, + Entity map[string]string + + // CharsetReader, if non-nil, defines a function to generate + // charset-conversion readers, converting from the provided + // non-UTF-8 charset into UTF-8. If CharsetReader is nil or + // returns an error, parsing stops with an error. One of the + // the CharsetReader's result values must be non-nil. + CharsetReader func(charset string, input io.Reader) (io.Reader, error) + + // DefaultSpace sets the default name space used for unadorned tags, + // as if the entire XML stream were wrapped in an element containing + // the attribute xmlns="DefaultSpace". + DefaultSpace string + + r io.ByteReader + buf bytes.Buffer + saved *bytes.Buffer + stk *stack + free *stack + needClose bool + toClose Name + nextToken Token + nextByte int + ns map[string]string + err error + line int + offset int64 + unmarshalDepth int +} + +// NewDecoder creates a new XML parser reading from r. +// If r does not implement io.ByteReader, NewDecoder will +// do its own buffering. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + ns: make(map[string]string), + nextByte: -1, + line: 1, + Strict: true, + } + d.switchToReader(r) + return d +} + +// Token returns the next XML token in the input stream. +// At the end of the input stream, Token returns nil, io.EOF. +// +// Slices of bytes in the returned token data refer to the +// parser's internal buffer and remain valid only until the next +// call to Token. To acquire a copy of the bytes, call CopyToken +// or the token's Copy method. +// +// Token expands self-closing elements such as
    +// into separate start and end elements returned by successive calls. +// +// Token guarantees that the StartElement and EndElement +// tokens it returns are properly nested and matched: +// if Token encounters an unexpected end element, +// it will return an error. +// +// Token implements XML name spaces as described by +// http://www.w3.org/TR/REC-xml-names/. Each of the +// Name structures contained in the Token has the Space +// set to the URL identifying its name space when known. +// If Token encounters an unrecognized name space prefix, +// it uses the prefix as the Space rather than report an error. +func (d *Decoder) Token() (t Token, err error) { + if d.stk != nil && d.stk.kind == stkEOF { + err = io.EOF + return + } + if d.nextToken != nil { + t = d.nextToken + d.nextToken = nil + } else if t, err = d.rawToken(); err != nil { + return + } + + if !d.Strict { + if t1, ok := d.autoClose(t); ok { + d.nextToken = t + t = t1 + } + } + switch t1 := t.(type) { + case StartElement: + // In XML name spaces, the translations listed in the + // attributes apply to the element name and + // to the other attribute names, so process + // the translations first. + for _, a := range t1.Attr { + if a.Name.Space == "xmlns" { + v, ok := d.ns[a.Name.Local] + d.pushNs(a.Name.Local, v, ok) + d.ns[a.Name.Local] = a.Value + } + if a.Name.Space == "" && a.Name.Local == "xmlns" { + // Default space for untagged names + v, ok := d.ns[""] + d.pushNs("", v, ok) + d.ns[""] = a.Value + } + } + + d.translate(&t1.Name, true) + for i := range t1.Attr { + d.translate(&t1.Attr[i].Name, false) + } + d.pushElement(t1.Name) + t = t1 + + case EndElement: + d.translate(&t1.Name, true) + if !d.popElement(&t1) { + return nil, d.err + } + t = t1 + } + return +} + +const xmlURL = "http://www.w3.org/XML/1998/namespace" + +// Apply name space translation to name n. +// The default name space (for Space=="") +// applies only to element names, not to attribute names. +func (d *Decoder) translate(n *Name, isElementName bool) { + switch { + case n.Space == "xmlns": + return + case n.Space == "" && !isElementName: + return + case n.Space == "xml": + n.Space = xmlURL + case n.Space == "" && n.Local == "xmlns": + return + } + if v, ok := d.ns[n.Space]; ok { + n.Space = v + } else if n.Space == "" { + n.Space = d.DefaultSpace + } +} + +func (d *Decoder) switchToReader(r io.Reader) { + // Get efficient byte at a time reader. + // Assume that if reader has its own + // ReadByte, it's efficient enough. + // Otherwise, use bufio. + if rb, ok := r.(io.ByteReader); ok { + d.r = rb + } else { + d.r = bufio.NewReader(r) + } +} + +// Parsing state - stack holds old name space translations +// and the current set of open elements. The translations to pop when +// ending a given tag are *below* it on the stack, which is +// more work but forced on us by XML. +type stack struct { + next *stack + kind int + name Name + ok bool +} + +const ( + stkStart = iota + stkNs + stkEOF +) + +func (d *Decoder) push(kind int) *stack { + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.next = d.stk + s.kind = kind + d.stk = s + return s +} + +func (d *Decoder) pop() *stack { + s := d.stk + if s != nil { + d.stk = s.next + s.next = d.free + d.free = s + } + return s +} + +// Record that after the current element is finished +// (that element is already pushed on the stack) +// Token should return EOF until popEOF is called. +func (d *Decoder) pushEOF() { + // Walk down stack to find Start. + // It might not be the top, because there might be stkNs + // entries above it. + start := d.stk + for start.kind != stkStart { + start = start.next + } + // The stkNs entries below a start are associated with that + // element too; skip over them. + for start.next != nil && start.next.kind == stkNs { + start = start.next + } + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.kind = stkEOF + s.next = start.next + start.next = s +} + +// Undo a pushEOF. +// The element must have been finished, so the EOF should be at the top of the stack. +func (d *Decoder) popEOF() bool { + if d.stk == nil || d.stk.kind != stkEOF { + return false + } + d.pop() + return true +} + +// Record that we are starting an element with the given name. +func (d *Decoder) pushElement(name Name) { + s := d.push(stkStart) + s.name = name +} + +// Record that we are changing the value of ns[local]. +// The old value is url, ok. +func (d *Decoder) pushNs(local string, url string, ok bool) { + s := d.push(stkNs) + s.name.Local = local + s.name.Space = url + s.ok = ok +} + +// Creates a SyntaxError with the current line number. +func (d *Decoder) syntaxError(msg string) error { + return &SyntaxError{Msg: msg, Line: d.line} +} + +// Record that we are ending an element with the given name. +// The name must match the record at the top of the stack, +// which must be a pushElement record. +// After popping the element, apply any undo records from +// the stack to restore the name translations that existed +// before we saw this element. +func (d *Decoder) popElement(t *EndElement) bool { + s := d.pop() + name := t.Name + switch { + case s == nil || s.kind != stkStart: + d.err = d.syntaxError("unexpected end element ") + return false + case s.name.Local != name.Local: + if !d.Strict { + d.needClose = true + d.toClose = t.Name + t.Name = s.name + return true + } + d.err = d.syntaxError("element <" + s.name.Local + "> closed by ") + return false + case s.name.Space != name.Space: + d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space + + "closed by in space " + name.Space) + return false + } + + // Pop stack until a Start or EOF is on the top, undoing the + // translations that were associated with the element we just closed. + for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF { + s := d.pop() + if s.ok { + d.ns[s.name.Local] = s.name.Space + } else { + delete(d.ns, s.name.Local) + } + } + + return true +} + +// If the top element on the stack is autoclosing and +// t is not the end tag, invent the end tag. +func (d *Decoder) autoClose(t Token) (Token, bool) { + if d.stk == nil || d.stk.kind != stkStart { + return nil, false + } + name := strings.ToLower(d.stk.name.Local) + for _, s := range d.AutoClose { + if strings.ToLower(s) == name { + // This one should be auto closed if t doesn't close it. + et, ok := t.(EndElement) + if !ok || et.Name.Local != name { + return EndElement{d.stk.name}, true + } + break + } + } + return nil, false +} + +var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method") + +// RawToken is like Token but does not verify that +// start and end elements match and does not translate +// name space prefixes to their corresponding URLs. +func (d *Decoder) RawToken() (Token, error) { + if d.unmarshalDepth > 0 { + return nil, errRawToken + } + return d.rawToken() +} + +func (d *Decoder) rawToken() (Token, error) { + if d.err != nil { + return nil, d.err + } + if d.needClose { + // The last element we read was self-closing and + // we returned just the StartElement half. + // Return the EndElement half now. + d.needClose = false + return EndElement{d.toClose}, nil + } + + b, ok := d.getc() + if !ok { + return nil, d.err + } + + if b != '<' { + // Text section. + d.ungetc(b) + data := d.text(-1, false) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + switch b { + case '/': + // ' { + d.err = d.syntaxError("invalid characters between ") + return nil, d.err + } + return EndElement{name}, nil + + case '?': + // ' { + break + } + b0 = b + } + data := d.buf.Bytes() + data = data[0 : len(data)-2] // chop ?> + + if target == "xml" { + content := string(data) + ver := procInst("version", content) + if ver != "" && ver != "1.0" { + d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver) + return nil, d.err + } + enc := procInst("encoding", content) + if enc != "" && enc != "utf-8" && enc != "UTF-8" { + if d.CharsetReader == nil { + d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) + return nil, d.err + } + newr, err := d.CharsetReader(enc, d.r.(io.Reader)) + if err != nil { + d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err) + return nil, d.err + } + if newr == nil { + panic("CharsetReader returned a nil Reader for charset " + enc) + } + d.switchToReader(newr) + } + } + return ProcInst{target, data}, nil + + case '!': + // ' { + break + } + b0, b1 = b1, b + } + data := d.buf.Bytes() + data = data[0 : len(data)-3] // chop --> + return Comment(data), nil + + case '[': // . + data := d.text(-1, true) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + // Probably a directive: , , etc. + // We don't care, but accumulate for caller. Quoted angle + // brackets do not count for nesting. + d.buf.Reset() + d.buf.WriteByte(b) + inquote := uint8(0) + depth := 0 + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if inquote == 0 && b == '>' && depth == 0 { + break + } + HandleB: + d.buf.WriteByte(b) + switch { + case b == inquote: + inquote = 0 + + case inquote != 0: + // in quotes, no special action + + case b == '\'' || b == '"': + inquote = b + + case b == '>' && inquote == 0: + depth-- + + case b == '<' && inquote == 0: + // Look for ` + +var testEntity = map[string]string{"何": "What", "is-it": "is it?"} + +var rawTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"", "hello"}}, + CharData("\n "), + StartElement{Name{"", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"", "query"}}, + CharData("\n "), + StartElement{Name{"", "goodbye"}, []Attr{}}, + EndElement{Name{"", "goodbye"}}, + CharData("\n "), + StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"", "inner"}, []Attr{}}, + EndElement{Name{"", "inner"}}, + CharData("\n "), + EndElement{Name{"", "outer"}}, + CharData("\n "), + StartElement{Name{"tag", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"tag", "name"}}, + CharData("\n"), + EndElement{Name{"", "body"}}, + Comment(" missing final newline "), +} + +var cookedTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"ns2", "hello"}}, + CharData("\n "), + StartElement{Name{"ns2", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"ns2", "query"}}, + CharData("\n "), + StartElement{Name{"ns2", "goodbye"}, []Attr{}}, + EndElement{Name{"ns2", "goodbye"}}, + CharData("\n "), + StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"ns2", "inner"}, []Attr{}}, + EndElement{Name{"ns2", "inner"}}, + CharData("\n "), + EndElement{Name{"ns2", "outer"}}, + CharData("\n "), + StartElement{Name{"ns3", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"ns3", "name"}}, + CharData("\n"), + EndElement{Name{"ns2", "body"}}, + Comment(" missing final newline "), +} + +const testInputAltEncoding = ` + +VALUE` + +var rawTokensAltEncoding = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("value"), + EndElement{Name{"", "tag"}}, +} + +var xmlInput = []string{ + // unexpected EOF cases + "<", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "", + "", + " c;", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "cdata]]>", +} + +func TestRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + testRawToken(t, d, testInput, rawTokens) +} + +const nonStrictInput = ` +non&entity +&unknown;entity +{ +&#zzz; +&なまえ3; +<-gt; +&; +&0a; +` + +var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"} + +var nonStrictTokens = []Token{ + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("non&entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&unknown;entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("{"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&#zzz;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&なまえ3;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("<-gt;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&0a;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), +} + +func TestNonStrictRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(nonStrictInput)) + d.Strict = false + testRawToken(t, d, nonStrictInput, nonStrictTokens) +} + +type downCaser struct { + t *testing.T + r io.ByteReader +} + +func (d *downCaser) ReadByte() (c byte, err error) { + c, err = d.r.ReadByte() + if c >= 'A' && c <= 'Z' { + c += 'a' - 'A' + } + return +} + +func (d *downCaser) Read(p []byte) (int, error) { + d.t.Fatalf("unexpected Read call on downCaser reader") + panic("unreachable") +} + +func TestRawTokenAltEncoding(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) { + if charset != "x-testing-uppercase" { + t.Fatalf("unexpected charset %q", charset) + } + return &downCaser{t, input.(io.ByteReader)}, nil + } + testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding) +} + +func TestRawTokenAltEncodingNoConverter(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + token, err := d.RawToken() + if token == nil { + t.Fatalf("expected a token on first RawToken call") + } + if err != nil { + t.Fatal(err) + } + token, err = d.RawToken() + if token != nil { + t.Errorf("expected a nil token; got %#v", token) + } + if err == nil { + t.Fatalf("expected an error on second RawToken call") + } + const encoding = "x-testing-uppercase" + if !strings.Contains(err.Error(), encoding) { + t.Errorf("expected error to contain %q; got error: %v", + encoding, err) + } +} + +func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) { + lastEnd := int64(0) + for i, want := range rawTokens { + start := d.InputOffset() + have, err := d.RawToken() + end := d.InputOffset() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + var shave, swant string + if _, ok := have.(CharData); ok { + shave = fmt.Sprintf("CharData(%q)", have) + } else { + shave = fmt.Sprintf("%#v", have) + } + if _, ok := want.(CharData); ok { + swant = fmt.Sprintf("CharData(%q)", want) + } else { + swant = fmt.Sprintf("%#v", want) + } + t.Errorf("token %d = %s, want %s", i, shave, swant) + } + + // Check that InputOffset returned actual token. + switch { + case start < lastEnd: + t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have) + case start >= end: + // Special case: EndElement can be synthesized. + if start == end && end == lastEnd { + break + } + t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have) + case end > int64(len(raw)): + t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have) + default: + text := raw[start:end] + if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) { + t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have) + } + } + lastEnd = end + } +} + +// Ensure that directives (specifically !DOCTYPE) include the complete +// text of any nested directives, noting that < and > do not change +// nesting depth if they are in single or double quotes. + +var nestedDirectivesInput = ` +]> +">]> +]> +'>]> +]> +'>]> +]> +` + +var nestedDirectivesTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE [">]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestNestedDirectives(t *testing.T) { + d := NewDecoder(strings.NewReader(nestedDirectivesInput)) + + for i, want := range nestedDirectivesTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + + for i, want := range cookedTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestSyntax(t *testing.T) { + for i := range xmlInput { + d := NewDecoder(strings.NewReader(xmlInput[i])) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if _, ok := err.(*SyntaxError); !ok { + t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i]) + } + } +} + +type allScalars struct { + True1 bool + True2 bool + False1 bool + False2 bool + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint int + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Uintptr uintptr + Float32 float32 + Float64 float64 + String string + PtrString *string +} + +var all = allScalars{ + True1: true, + True2: true, + False1: false, + False2: false, + Int: 1, + Int8: -2, + Int16: 3, + Int32: -4, + Int64: 5, + Uint: 6, + Uint8: 7, + Uint16: 8, + Uint32: 9, + Uint64: 10, + Uintptr: 11, + Float32: 13.0, + Float64: 14.0, + String: "15", + PtrString: &sixteen, +} + +var sixteen = "16" + +const testScalarsInput = ` + true + 1 + false + 0 + 1 + -2 + 3 + -4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12.0 + 13.0 + 14.0 + 15 + 16 +` + +func TestAllScalars(t *testing.T) { + var a allScalars + err := Unmarshal([]byte(testScalarsInput), &a) + + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, all) { + t.Errorf("have %+v want %+v", a, all) + } +} + +type item struct { + Field_a string +} + +func TestIssue569(t *testing.T) { + data := `abcd` + var i item + err := Unmarshal([]byte(data), &i) + + if err != nil || i.Field_a != "abcd" { + t.Fatal("Expecting abcd") + } +} + +func TestUnquotedAttrs(t *testing.T) { + data := "" + d := NewDecoder(strings.NewReader(data)) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != "tag" { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != "azAZ09:-_" { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != "attr" { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } +} + +func TestValuelessAttrs(t *testing.T) { + tests := [][3]string{ + {"

    ", "p", "nowrap"}, + {"

    ", "p", "nowrap"}, + {"", "input", "checked"}, + {"", "input", "checked"}, + } + for _, test := range tests { + d := NewDecoder(strings.NewReader(test[0])) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != test[1] { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != test[2] { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != test[2] { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } + } +} + +func TestCopyTokenCharData(t *testing.T) { + data := []byte("same data") + var tok1 Token = CharData(data) + tok2 := CopyToken(tok1) + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) != CharData") + } + data[1] = 'o' + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestCopyTokenStartElement(t *testing.T) { + elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}} + var tok1 Token = elt + tok2 := CopyToken(tok1) + if tok1.(StartElement).Attr[0].Value != "en" { + t.Error("CopyToken overwrote Attr[0]") + } + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(StartElement) != StartElement") + } + tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"} + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestSyntaxErrorLineNum(t *testing.T) { + testInput := "

    Foo

    \n\n

    Bar\n" + d := NewDecoder(strings.NewReader(testInput)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Error("Expected SyntaxError.") + } + if synerr.Line != 3 { + t.Error("SyntaxError didn't have correct line number.") + } +} + +func TestTrailingRawToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.RawToken(); err == nil; _, err = d.RawToken() { + } + if err != io.EOF { + t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err) + } +} + +func TestTrailingToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +func TestEntityInsideCDATA(t *testing.T) { + input := `` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +var characterTests = []struct { + in string + err string +}{ + {"\x12", "illegal character code U+0012"}, + {"\x0b", "illegal character code U+000B"}, + {"\xef\xbf\xbe", "illegal character code U+FFFE"}, + {"\r\n\x07", "illegal character code U+0007"}, + {"what's up", "expected attribute name in element"}, + {"&abc\x01;", "invalid character entity &abc (no semicolon)"}, + {"&\x01;", "invalid character entity & (no semicolon)"}, + {"&\xef\xbf\xbe;", "invalid character entity &\uFFFE;"}, + {"&hello;", "invalid character entity &hello;"}, +} + +func TestDisallowedCharacters(t *testing.T) { + + for i, tt := range characterTests { + d := NewDecoder(strings.NewReader(tt.in)) + var err error + + for err == nil { + _, err = d.Token() + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err) + } + if synerr.Msg != tt.err { + t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg) + } + } +} + +type procInstEncodingTest struct { + expect, got string +} + +var procInstTests = []struct { + input string + expect [2]string +}{ + {`version="1.0" encoding="utf-8"`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8'`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8' `, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding=utf-8`, [2]string{"1.0", ""}}, + {`encoding="FOO" `, [2]string{"", "FOO"}}, +} + +func TestProcInstEncoding(t *testing.T) { + for _, test := range procInstTests { + if got := procInst("version", test.input); got != test.expect[0] { + t.Errorf("procInst(version, %q) = %q; want %q", test.input, got, test.expect[0]) + } + if got := procInst("encoding", test.input); got != test.expect[1] { + t.Errorf("procInst(encoding, %q) = %q; want %q", test.input, got, test.expect[1]) + } + } +} + +// Ensure that directives with comments include the complete +// text of any nested directives. + +var directivesWithCommentsInput = ` +]> +]> + --> --> []> +` + +var directivesWithCommentsTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestDirectivesWithComments(t *testing.T) { + d := NewDecoder(strings.NewReader(directivesWithCommentsInput)) + + for i, want := range directivesWithCommentsTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +// Writer whose Write method always returns an error. +type errWriter struct{} + +func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") } + +func TestEscapeTextIOErrors(t *testing.T) { + expectErr := "unwritable" + err := EscapeText(errWriter{}, []byte{'A'}) + + if err == nil || err.Error() != expectErr { + t.Errorf("have %v, want %v", err, expectErr) + } +} + +func TestEscapeTextInvalidChar(t *testing.T) { + input := []byte("A \x00 terminated string.") + expected := "A \uFFFD terminated string." + + buff := new(bytes.Buffer) + if err := EscapeText(buff, input); err != nil { + t.Fatalf("have %v, want nil", err) + } + text := buff.String() + + if text != expected { + t.Errorf("have %v, want %v", text, expected) + } +} + +func TestIssue5880(t *testing.T) { + type T []byte + data, err := Marshal(T{192, 168, 0, 1}) + if err != nil { + t.Errorf("Marshal error: %v", err) + } + if !utf8.Valid(data) { + t.Errorf("Marshal generated invalid UTF-8: %x", data) + } +} diff --git a/vendor/golang.org/x/net/webdav/litmus_test_server.go b/vendor/golang.org/x/net/webdav/litmus_test_server.go new file mode 100644 index 000000000..514db5dd1 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/litmus_test_server.go @@ -0,0 +1,94 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +This program is a server for the WebDAV 'litmus' compliance test at +http://www.webdav.org/neon/litmus/ +To run the test: + +go run litmus_test_server.go + +and separately, from the downloaded litmus-xxx directory: + +make URL=http://localhost:9999/ check +*/ +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/webdav" +) + +var port = flag.Int("port", 9999, "server port") + +func main() { + flag.Parse() + log.SetFlags(0) + h := &webdav.Handler{ + FileSystem: webdav.NewMemFS(), + LockSystem: webdav.NewMemLS(), + Logger: func(r *http.Request, err error) { + litmus := r.Header.Get("X-Litmus") + if len(litmus) > 19 { + litmus = litmus[:16] + "..." + } + + switch r.Method { + case "COPY", "MOVE": + dst := "" + if u, err := url.Parse(r.Header.Get("Destination")); err == nil { + dst = u.Path + } + o := r.Header.Get("Overwrite") + log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err) + default: + log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err) + } + }, + } + + // The next line would normally be: + // http.Handle("/", h) + // but we wrap that HTTP handler h to cater for a special case. + // + // The propfind_invalid2 litmus test case expects an empty namespace prefix + // declaration to be an error. The FAQ in the webdav litmus test says: + // + // "What does the "propfind_invalid2" test check for?... + // + // If a request was sent with an XML body which included an empty namespace + // prefix declaration (xmlns:ns1=""), then the server must reject that with + // a "400 Bad Request" response, as it is invalid according to the XML + // Namespace specification." + // + // On the other hand, the Go standard library's encoding/xml package + // accepts an empty xmlns namespace, as per the discussion at + // https://github.com/golang/go/issues/8068 + // + // Empty namespaces seem disallowed in the second (2006) edition of the XML + // standard, but allowed in a later edition. The grammar differs between + // http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and + // http://www.w3.org/TR/REC-xml-names/#dt-prefix + // + // Thus, we assume that the propfind_invalid2 test is obsolete, and + // hard-code the 400 Bad Request response that the test expects. + http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" { + http.Error(w, "400 Bad Request", http.StatusBadRequest) + return + } + h.ServeHTTP(w, r) + })) + + addr := fmt.Sprintf(":%d", *port) + log.Printf("Serving %v", addr) + log.Fatal(http.ListenAndServe(addr, nil)) +} diff --git a/vendor/golang.org/x/net/webdav/lock.go b/vendor/golang.org/x/net/webdav/lock.go new file mode 100644 index 000000000..344ac5cea --- /dev/null +++ b/vendor/golang.org/x/net/webdav/lock.go @@ -0,0 +1,445 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "container/heap" + "errors" + "strconv" + "strings" + "sync" + "time" +) + +var ( + // ErrConfirmationFailed is returned by a LockSystem's Confirm method. + ErrConfirmationFailed = errors.New("webdav: confirmation failed") + // ErrForbidden is returned by a LockSystem's Unlock method. + ErrForbidden = errors.New("webdav: forbidden") + // ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods. + ErrLocked = errors.New("webdav: locked") + // ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods. + ErrNoSuchLock = errors.New("webdav: no such lock") +) + +// Condition can match a WebDAV resource, based on a token or ETag. +// Exactly one of Token and ETag should be non-empty. +type Condition struct { + Not bool + Token string + ETag string +} + +// LockSystem manages access to a collection of named resources. The elements +// in a lock name are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +type LockSystem interface { + // Confirm confirms that the caller can claim all of the locks specified by + // the given conditions, and that holding the union of all of those locks + // gives exclusive access to all of the named resources. Up to two resources + // can be named. Empty names are ignored. + // + // Exactly one of release and err will be non-nil. If release is non-nil, + // all of the requested locks are held until release is called. Calling + // release does not unlock the lock, in the WebDAV UNLOCK sense, but once + // Confirm has confirmed that a lock claim is valid, that lock cannot be + // Confirmed again until it has been released. + // + // If Confirm returns ErrConfirmationFailed then the Handler will continue + // to try any other set of locks presented (a WebDAV HTTP request can + // present more than one set of locks). If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error) + + // Create creates a lock with the given depth, duration, owner and root + // (name). The depth will either be negative (meaning infinite) or zero. + // + // If Create returns ErrLocked then the Handler will write a "423 Locked" + // HTTP status. If it returns any other non-nil error, the Handler will + // write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + // + // The token returned identifies the created lock. It should be an absolute + // URI as defined by RFC 3986, Section 4.3. In particular, it should not + // contain whitespace. + Create(now time.Time, details LockDetails) (token string, err error) + + // Refresh refreshes the lock with the given token. + // + // If Refresh returns ErrLocked then the Handler will write a "423 Locked" + // HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write + // a "412 Precondition Failed" HTTP Status. If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) + + // Unlock unlocks the lock with the given token. + // + // If Unlock returns ErrForbidden then the Handler will write a "403 + // Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler + // will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock + // then the Handler will write a "409 Conflict" HTTP Status. If it returns + // any other non-nil error, the Handler will write a "500 Internal Server + // Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for + // when to use each error. + Unlock(now time.Time, token string) error +} + +// LockDetails are a lock's metadata. +type LockDetails struct { + // Root is the root resource name being locked. For a zero-depth lock, the + // root is the only resource being locked. + Root string + // Duration is the lock timeout. A negative duration means infinite. + Duration time.Duration + // OwnerXML is the verbatim XML given in a LOCK HTTP request. + // + // TODO: does the "verbatim" nature play well with XML namespaces? + // Does the OwnerXML field need to have more structure? See + // https://codereview.appspot.com/175140043/#msg2 + OwnerXML string + // ZeroDepth is whether the lock has zero depth. If it does not have zero + // depth, it has infinite depth. + ZeroDepth bool +} + +// NewMemLS returns a new in-memory LockSystem. +func NewMemLS() LockSystem { + return &memLS{ + byName: make(map[string]*memLSNode), + byToken: make(map[string]*memLSNode), + gen: uint64(time.Now().Unix()), + } +} + +type memLS struct { + mu sync.Mutex + byName map[string]*memLSNode + byToken map[string]*memLSNode + gen uint64 + // byExpiry only contains those nodes whose LockDetails have a finite + // Duration and are yet to expire. + byExpiry byExpiry +} + +func (m *memLS) nextToken() string { + m.gen++ + return strconv.FormatUint(m.gen, 10) +} + +func (m *memLS) collectExpiredNodes(now time.Time) { + for len(m.byExpiry) > 0 { + if now.Before(m.byExpiry[0].expiry) { + break + } + m.remove(m.byExpiry[0]) + } +} + +func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + var n0, n1 *memLSNode + if name0 != "" { + if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil { + return nil, ErrConfirmationFailed + } + } + if name1 != "" { + if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil { + return nil, ErrConfirmationFailed + } + } + + // Don't hold the same node twice. + if n1 == n0 { + n1 = nil + } + + if n0 != nil { + m.hold(n0) + } + if n1 != nil { + m.hold(n1) + } + return func() { + m.mu.Lock() + defer m.mu.Unlock() + if n1 != nil { + m.unhold(n1) + } + if n0 != nil { + m.unhold(n0) + } + }, nil +} + +// lookup returns the node n that locks the named resource, provided that n +// matches at least one of the given conditions and that lock isn't held by +// another party. Otherwise, it returns nil. +// +// n may be a parent of the named resource, if n is an infinite depth lock. +func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) { + // TODO: support Condition.Not and Condition.ETag. + for _, c := range conditions { + n = m.byToken[c.Token] + if n == nil || n.held { + continue + } + if name == n.details.Root { + return n + } + if n.details.ZeroDepth { + continue + } + if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") { + return n + } + } + return nil +} + +func (m *memLS) hold(n *memLSNode) { + if n.held { + panic("webdav: memLS inconsistent held state") + } + n.held = true + if n.details.Duration >= 0 && n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } +} + +func (m *memLS) unhold(n *memLSNode) { + if !n.held { + panic("webdav: memLS inconsistent held state") + } + n.held = false + if n.details.Duration >= 0 { + heap.Push(&m.byExpiry, n) + } +} + +func (m *memLS) Create(now time.Time, details LockDetails) (string, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + details.Root = slashClean(details.Root) + + if !m.canCreate(details.Root, details.ZeroDepth) { + return "", ErrLocked + } + n := m.create(details.Root) + n.token = m.nextToken() + m.byToken[n.token] = n + n.details = details + if n.details.Duration >= 0 { + n.expiry = now.Add(n.details.Duration) + heap.Push(&m.byExpiry, n) + } + return n.token, nil +} + +func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + n := m.byToken[token] + if n == nil { + return LockDetails{}, ErrNoSuchLock + } + if n.held { + return LockDetails{}, ErrLocked + } + if n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } + n.details.Duration = duration + if n.details.Duration >= 0 { + n.expiry = now.Add(n.details.Duration) + heap.Push(&m.byExpiry, n) + } + return n.details, nil +} + +func (m *memLS) Unlock(now time.Time, token string) error { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + n := m.byToken[token] + if n == nil { + return ErrNoSuchLock + } + if n.held { + return ErrLocked + } + m.remove(n) + return nil +} + +func (m *memLS) canCreate(name string, zeroDepth bool) bool { + return walkToRoot(name, func(name0 string, first bool) bool { + n := m.byName[name0] + if n == nil { + return true + } + if first { + if n.token != "" { + // The target node is already locked. + return false + } + if !zeroDepth { + // The requested lock depth is infinite, and the fact that n exists + // (n != nil) means that a descendent of the target node is locked. + return false + } + } else if n.token != "" && !n.details.ZeroDepth { + // An ancestor of the target node is locked with infinite depth. + return false + } + return true + }) +} + +func (m *memLS) create(name string) (ret *memLSNode) { + walkToRoot(name, func(name0 string, first bool) bool { + n := m.byName[name0] + if n == nil { + n = &memLSNode{ + details: LockDetails{ + Root: name0, + }, + byExpiryIndex: -1, + } + m.byName[name0] = n + } + n.refCount++ + if first { + ret = n + } + return true + }) + return ret +} + +func (m *memLS) remove(n *memLSNode) { + delete(m.byToken, n.token) + n.token = "" + walkToRoot(n.details.Root, func(name0 string, first bool) bool { + x := m.byName[name0] + x.refCount-- + if x.refCount == 0 { + delete(m.byName, name0) + } + return true + }) + if n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } +} + +func walkToRoot(name string, f func(name0 string, first bool) bool) bool { + for first := true; ; first = false { + if !f(name, first) { + return false + } + if name == "/" { + break + } + name = name[:strings.LastIndex(name, "/")] + if name == "" { + name = "/" + } + } + return true +} + +type memLSNode struct { + // details are the lock metadata. Even if this node's name is not explicitly locked, + // details.Root will still equal the node's name. + details LockDetails + // token is the unique identifier for this node's lock. An empty token means that + // this node is not explicitly locked. + token string + // refCount is the number of self-or-descendent nodes that are explicitly locked. + refCount int + // expiry is when this node's lock expires. + expiry time.Time + // byExpiryIndex is the index of this node in memLS.byExpiry. It is -1 + // if this node does not expire, or has expired. + byExpiryIndex int + // held is whether this node's lock is actively held by a Confirm call. + held bool +} + +type byExpiry []*memLSNode + +func (b *byExpiry) Len() int { + return len(*b) +} + +func (b *byExpiry) Less(i, j int) bool { + return (*b)[i].expiry.Before((*b)[j].expiry) +} + +func (b *byExpiry) Swap(i, j int) { + (*b)[i], (*b)[j] = (*b)[j], (*b)[i] + (*b)[i].byExpiryIndex = i + (*b)[j].byExpiryIndex = j +} + +func (b *byExpiry) Push(x interface{}) { + n := x.(*memLSNode) + n.byExpiryIndex = len(*b) + *b = append(*b, n) +} + +func (b *byExpiry) Pop() interface{} { + i := len(*b) - 1 + n := (*b)[i] + (*b)[i] = nil + n.byExpiryIndex = -1 + *b = (*b)[:i] + return n +} + +const infiniteTimeout = -1 + +// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is +// empty, an infiniteTimeout is returned. +func parseTimeout(s string) (time.Duration, error) { + if s == "" { + return infiniteTimeout, nil + } + if i := strings.IndexByte(s, ','); i >= 0 { + s = s[:i] + } + s = strings.TrimSpace(s) + if s == "Infinite" { + return infiniteTimeout, nil + } + const pre = "Second-" + if !strings.HasPrefix(s, pre) { + return 0, errInvalidTimeout + } + s = s[len(pre):] + if s == "" || s[0] < '0' || '9' < s[0] { + return 0, errInvalidTimeout + } + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || 1<<32-1 < n { + return 0, errInvalidTimeout + } + return time.Duration(n) * time.Second, nil +} diff --git a/vendor/golang.org/x/net/webdav/lock_test.go b/vendor/golang.org/x/net/webdav/lock_test.go new file mode 100644 index 000000000..5cf14cda4 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/lock_test.go @@ -0,0 +1,731 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "fmt" + "math/rand" + "path" + "reflect" + "sort" + "strconv" + "strings" + "testing" + "time" +) + +func TestWalkToRoot(t *testing.T) { + testCases := []struct { + name string + want []string + }{{ + "/a/b/c/d", + []string{ + "/a/b/c/d", + "/a/b/c", + "/a/b", + "/a", + "/", + }, + }, { + "/a", + []string{ + "/a", + "/", + }, + }, { + "/", + []string{ + "/", + }, + }} + + for _, tc := range testCases { + var got []string + if !walkToRoot(tc.name, func(name0 string, first bool) bool { + if first != (len(got) == 0) { + t.Errorf("name=%q: first=%t but len(got)==%d", tc.name, first, len(got)) + return false + } + got = append(got, name0) + return true + }) { + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("name=%q:\ngot %q\nwant %q", tc.name, got, tc.want) + } + } +} + +var lockTestDurations = []time.Duration{ + infiniteTimeout, // infiniteTimeout means to never expire. + 0, // A zero duration means to expire immediately. + 100 * time.Hour, // A very large duration will not expire in these tests. +} + +// lockTestNames are the names of a set of mutually compatible locks. For each +// name fragment: +// - _ means no explicit lock. +// - i means an infinite-depth lock, +// - z means a zero-depth lock, +var lockTestNames = []string{ + "/_/_/_/_/z", + "/_/_/i", + "/_/z", + "/_/z/i", + "/_/z/z", + "/_/z/_/i", + "/_/z/_/z", + "/i", + "/z", + "/z/_/i", + "/z/_/z", +} + +func lockTestZeroDepth(name string) bool { + switch name[len(name)-1] { + case 'i': + return false + case 'z': + return true + } + panic(fmt.Sprintf("lock name %q did not end with 'i' or 'z'", name)) +} + +func TestMemLSCanCreate(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + + for _, name := range lockTestNames { + _, err := m.Create(now, LockDetails{ + Root: name, + Duration: infiniteTimeout, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("creating lock for %q: %v", name, err) + } + } + + wantCanCreate := func(name string, zeroDepth bool) bool { + for _, n := range lockTestNames { + switch { + case n == name: + // An existing lock has the same name as the proposed lock. + return false + case strings.HasPrefix(n, name): + // An existing lock would be a child of the proposed lock, + // which conflicts if the proposed lock has infinite depth. + if !zeroDepth { + return false + } + case strings.HasPrefix(name, n): + // An existing lock would be an ancestor of the proposed lock, + // which conflicts if the ancestor has infinite depth. + if n[len(n)-1] == 'i' { + return false + } + } + } + return true + } + + var check func(int, string) + check = func(recursion int, name string) { + for _, zeroDepth := range []bool{false, true} { + got := m.canCreate(name, zeroDepth) + want := wantCanCreate(name, zeroDepth) + if got != want { + t.Errorf("canCreate name=%q zeroDepth=%t: got %t, want %t", name, zeroDepth, got, want) + } + } + if recursion == 6 { + return + } + if name != "/" { + name += "/" + } + for _, c := range "_iz" { + check(recursion+1, name+string(c)) + } + } + check(0, "/") +} + +func TestMemLSLookup(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + + badToken := m.nextToken() + t.Logf("badToken=%q", badToken) + + for _, name := range lockTestNames { + token, err := m.Create(now, LockDetails{ + Root: name, + Duration: infiniteTimeout, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("creating lock for %q: %v", name, err) + } + t.Logf("%-15q -> node=%p token=%q", name, m.byName[name], token) + } + + baseNames := append([]string{"/a", "/b/c"}, lockTestNames...) + for _, baseName := range baseNames { + for _, suffix := range []string{"", "/0", "/1/2/3"} { + name := baseName + suffix + + goodToken := "" + base := m.byName[baseName] + if base != nil && (suffix == "" || !lockTestZeroDepth(baseName)) { + goodToken = base.token + } + + for _, token := range []string{badToken, goodToken} { + if token == "" { + continue + } + + got := m.lookup(name, Condition{Token: token}) + want := base + if token == badToken { + want = nil + } + if got != want { + t.Errorf("name=%-20qtoken=%q (bad=%t): got %p, want %p", + name, token, token == badToken, got, want) + } + } + } + } +} + +func TestMemLSConfirm(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + alice, err := m.Create(now, LockDetails{ + Root: "/alice", + Duration: infiniteTimeout, + ZeroDepth: false, + }) + tweedle, err := m.Create(now, LockDetails{ + Root: "/tweedle", + Duration: infiniteTimeout, + ZeroDepth: false, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Create: inconsistent state: %v", err) + } + + // Test a mismatch between name and condition. + _, err = m.Confirm(now, "/tweedle/dee", "", Condition{Token: alice}) + if err != ErrConfirmationFailed { + t.Fatalf("Confirm (mismatch): got %v, want ErrConfirmationFailed", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (mismatch): inconsistent state: %v", err) + } + + // Test two names (that fall under the same lock) in the one Confirm call. + release, err := m.Confirm(now, "/tweedle/dee", "/tweedle/dum", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (twins): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (twins): inconsistent state: %v", err) + } + release() + if err := m.consistent(); err != nil { + t.Fatalf("release (twins): inconsistent state: %v", err) + } + + // Test the same two names in overlapping Confirm / release calls. + releaseDee, err := m.Confirm(now, "/tweedle/dee", "", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (sequence #0): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #0): inconsistent state: %v", err) + } + + _, err = m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) + if err != ErrConfirmationFailed { + t.Fatalf("Confirm (sequence #1): got %v, want ErrConfirmationFailed", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #1): inconsistent state: %v", err) + } + + releaseDee() + if err := m.consistent(); err != nil { + t.Fatalf("release (sequence #2): inconsistent state: %v", err) + } + + releaseDum, err := m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (sequence #3): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #3): inconsistent state: %v", err) + } + + // Test that you can't unlock a held lock. + err = m.Unlock(now, tweedle) + if err != ErrLocked { + t.Fatalf("Unlock (sequence #4): got %v, want ErrLocked", err) + } + + releaseDum() + if err := m.consistent(); err != nil { + t.Fatalf("release (sequence #5): inconsistent state: %v", err) + } + + err = m.Unlock(now, tweedle) + if err != nil { + t.Fatalf("Unlock (sequence #6): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Unlock (sequence #6): inconsistent state: %v", err) + } +} + +func TestMemLSNonCanonicalRoot(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + token, err := m.Create(now, LockDetails{ + Root: "/foo/./bar//", + Duration: 1 * time.Second, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Create: inconsistent state: %v", err) + } + if err := m.Unlock(now, token); err != nil { + t.Fatalf("Unlock: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Unlock: inconsistent state: %v", err) + } +} + +func TestMemLSExpiry(t *testing.T) { + m := NewMemLS().(*memLS) + testCases := []string{ + "setNow 0", + "create /a.5", + "want /a.5", + "create /c.6", + "want /a.5 /c.6", + "create /a/b.7", + "want /a.5 /a/b.7 /c.6", + "setNow 4", + "want /a.5 /a/b.7 /c.6", + "setNow 5", + "want /a/b.7 /c.6", + "setNow 6", + "want /a/b.7", + "setNow 7", + "want ", + "setNow 8", + "want ", + "create /a.12", + "create /b.13", + "create /c.15", + "create /a/d.16", + "want /a.12 /a/d.16 /b.13 /c.15", + "refresh /a.14", + "want /a.14 /a/d.16 /b.13 /c.15", + "setNow 12", + "want /a.14 /a/d.16 /b.13 /c.15", + "setNow 13", + "want /a.14 /a/d.16 /c.15", + "setNow 14", + "want /a/d.16 /c.15", + "refresh /a/d.20", + "refresh /c.20", + "want /a/d.20 /c.20", + "setNow 20", + "want ", + } + + tokens := map[string]string{} + zTime := time.Unix(0, 0) + now := zTime + for i, tc := range testCases { + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "create", "refresh": + parts := strings.Split(arg, ".") + if len(parts) != 2 { + t.Fatalf("test case #%d %q: invalid create", i, tc) + } + root := parts[0] + d, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("test case #%d %q: invalid duration", i, tc) + } + dur := time.Unix(0, 0).Add(time.Duration(d) * time.Second).Sub(now) + + switch op { + case "create": + token, err := m.Create(now, LockDetails{ + Root: root, + Duration: dur, + ZeroDepth: true, + }) + if err != nil { + t.Fatalf("test case #%d %q: Create: %v", i, tc, err) + } + tokens[root] = token + + case "refresh": + token := tokens[root] + if token == "" { + t.Fatalf("test case #%d %q: no token for %q", i, tc, root) + } + got, err := m.Refresh(now, token, dur) + if err != nil { + t.Fatalf("test case #%d %q: Refresh: %v", i, tc, err) + } + want := LockDetails{ + Root: root, + Duration: dur, + ZeroDepth: true, + } + if got != want { + t.Fatalf("test case #%d %q:\ngot %v\nwant %v", i, tc, got, want) + } + } + + case "setNow": + d, err := strconv.Atoi(arg) + if err != nil { + t.Fatalf("test case #%d %q: invalid duration", i, tc) + } + now = time.Unix(0, 0).Add(time.Duration(d) * time.Second) + + case "want": + m.mu.Lock() + m.collectExpiredNodes(now) + got := make([]string, 0, len(m.byToken)) + for _, n := range m.byToken { + got = append(got, fmt.Sprintf("%s.%d", + n.details.Root, n.expiry.Sub(zTime)/time.Second)) + } + m.mu.Unlock() + sort.Strings(got) + want := []string{} + if arg != "" { + want = strings.Split(arg, " ") + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, want) + } + } + + if err := m.consistent(); err != nil { + t.Fatalf("test case #%d %q: inconsistent state: %v", i, tc, err) + } + } +} + +func TestMemLS(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + rng := rand.New(rand.NewSource(0)) + tokens := map[string]string{} + nConfirm, nCreate, nRefresh, nUnlock := 0, 0, 0, 0 + const N = 2000 + + for i := 0; i < N; i++ { + name := lockTestNames[rng.Intn(len(lockTestNames))] + duration := lockTestDurations[rng.Intn(len(lockTestDurations))] + confirmed, unlocked := false, false + + // If the name was already locked, we randomly confirm/release, refresh + // or unlock it. Otherwise, we create a lock. + token := tokens[name] + if token != "" { + switch rng.Intn(3) { + case 0: + confirmed = true + nConfirm++ + release, err := m.Confirm(now, name, "", Condition{Token: token}) + if err != nil { + t.Fatalf("iteration #%d: Confirm %q: %v", i, name, err) + } + if err := m.consistent(); err != nil { + t.Fatalf("iteration #%d: inconsistent state: %v", i, err) + } + release() + + case 1: + nRefresh++ + if _, err := m.Refresh(now, token, duration); err != nil { + t.Fatalf("iteration #%d: Refresh %q: %v", i, name, err) + } + + case 2: + unlocked = true + nUnlock++ + if err := m.Unlock(now, token); err != nil { + t.Fatalf("iteration #%d: Unlock %q: %v", i, name, err) + } + } + + } else { + nCreate++ + var err error + token, err = m.Create(now, LockDetails{ + Root: name, + Duration: duration, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("iteration #%d: Create %q: %v", i, name, err) + } + } + + if !confirmed { + if duration == 0 || unlocked { + // A zero-duration lock should expire immediately and is + // effectively equivalent to being unlocked. + tokens[name] = "" + } else { + tokens[name] = token + } + } + + if err := m.consistent(); err != nil { + t.Fatalf("iteration #%d: inconsistent state: %v", i, err) + } + } + + if nConfirm < N/10 { + t.Fatalf("too few Confirm calls: got %d, want >= %d", nConfirm, N/10) + } + if nCreate < N/10 { + t.Fatalf("too few Create calls: got %d, want >= %d", nCreate, N/10) + } + if nRefresh < N/10 { + t.Fatalf("too few Refresh calls: got %d, want >= %d", nRefresh, N/10) + } + if nUnlock < N/10 { + t.Fatalf("too few Unlock calls: got %d, want >= %d", nUnlock, N/10) + } +} + +func (m *memLS) consistent() error { + m.mu.Lock() + defer m.mu.Unlock() + + // If m.byName is non-empty, then it must contain an entry for the root "/", + // and its refCount should equal the number of locked nodes. + if len(m.byName) > 0 { + n := m.byName["/"] + if n == nil { + return fmt.Errorf(`non-empty m.byName does not contain the root "/"`) + } + if n.refCount != len(m.byToken) { + return fmt.Errorf("root node refCount=%d, differs from len(m.byToken)=%d", n.refCount, len(m.byToken)) + } + } + + for name, n := range m.byName { + // The map keys should be consistent with the node's copy of the key. + if n.details.Root != name { + return fmt.Errorf("node name %q != byName map key %q", n.details.Root, name) + } + + // A name must be clean, and start with a "/". + if len(name) == 0 || name[0] != '/' { + return fmt.Errorf(`node name %q does not start with "/"`, name) + } + if name != path.Clean(name) { + return fmt.Errorf(`node name %q is not clean`, name) + } + + // A node's refCount should be positive. + if n.refCount <= 0 { + return fmt.Errorf("non-positive refCount for node at name %q", name) + } + + // A node's refCount should be the number of self-or-descendents that + // are locked (i.e. have a non-empty token). + var list []string + for name0, n0 := range m.byName { + // All of lockTestNames' name fragments are one byte long: '_', 'i' or 'z', + // so strings.HasPrefix is equivalent to self-or-descendent name match. + // We don't have to worry about "/foo/bar" being a false positive match + // for "/foo/b". + if strings.HasPrefix(name0, name) && n0.token != "" { + list = append(list, name0) + } + } + if n.refCount != len(list) { + sort.Strings(list) + return fmt.Errorf("node at name %q has refCount %d but locked self-or-descendents are %q (len=%d)", + name, n.refCount, list, len(list)) + } + + // A node n is in m.byToken if it has a non-empty token. + if n.token != "" { + if _, ok := m.byToken[n.token]; !ok { + return fmt.Errorf("node at name %q has token %q but not in m.byToken", name, n.token) + } + } + + // A node n is in m.byExpiry if it has a non-negative byExpiryIndex. + if n.byExpiryIndex >= 0 { + if n.byExpiryIndex >= len(m.byExpiry) { + return fmt.Errorf("node at name %q has byExpiryIndex %d but m.byExpiry has length %d", name, n.byExpiryIndex, len(m.byExpiry)) + } + if n != m.byExpiry[n.byExpiryIndex] { + return fmt.Errorf("node at name %q has byExpiryIndex %d but that indexes a different node", name, n.byExpiryIndex) + } + } + } + + for token, n := range m.byToken { + // The map keys should be consistent with the node's copy of the key. + if n.token != token { + return fmt.Errorf("node token %q != byToken map key %q", n.token, token) + } + + // Every node in m.byToken is in m.byName. + if _, ok := m.byName[n.details.Root]; !ok { + return fmt.Errorf("node at name %q in m.byToken but not in m.byName", n.details.Root) + } + } + + for i, n := range m.byExpiry { + // The slice indices should be consistent with the node's copy of the index. + if n.byExpiryIndex != i { + return fmt.Errorf("node byExpiryIndex %d != byExpiry slice index %d", n.byExpiryIndex, i) + } + + // Every node in m.byExpiry is in m.byName. + if _, ok := m.byName[n.details.Root]; !ok { + return fmt.Errorf("node at name %q in m.byExpiry but not in m.byName", n.details.Root) + } + + // No node in m.byExpiry should be held. + if n.held { + return fmt.Errorf("node at name %q in m.byExpiry is held", n.details.Root) + } + } + return nil +} + +func TestParseTimeout(t *testing.T) { + testCases := []struct { + s string + want time.Duration + wantErr error + }{{ + "", + infiniteTimeout, + nil, + }, { + "Infinite", + infiniteTimeout, + nil, + }, { + "Infinitesimal", + 0, + errInvalidTimeout, + }, { + "infinite", + 0, + errInvalidTimeout, + }, { + "Second-0", + 0 * time.Second, + nil, + }, { + "Second-123", + 123 * time.Second, + nil, + }, { + " Second-456 ", + 456 * time.Second, + nil, + }, { + "Second-4100000000", + 4100000000 * time.Second, + nil, + }, { + "junk", + 0, + errInvalidTimeout, + }, { + "Second-", + 0, + errInvalidTimeout, + }, { + "Second--1", + 0, + errInvalidTimeout, + }, { + "Second--123", + 0, + errInvalidTimeout, + }, { + "Second-+123", + 0, + errInvalidTimeout, + }, { + "Second-0x123", + 0, + errInvalidTimeout, + }, { + "second-123", + 0, + errInvalidTimeout, + }, { + "Second-4294967295", + 4294967295 * time.Second, + nil, + }, { + // Section 10.7 says that "The timeout value for TimeType "Second" + // must not be greater than 2^32-1." + "Second-4294967296", + 0, + errInvalidTimeout, + }, { + // This test case comes from section 9.10.9 of the spec. It says, + // + // "In this request, the client has specified that it desires an + // infinite-length lock, if available, otherwise a timeout of 4.1 + // billion seconds, if available." + // + // The Go WebDAV package always supports infinite length locks, + // and ignores the fallback after the comma. + "Infinite, Second-4100000000", + infiniteTimeout, + nil, + }} + + for _, tc := range testCases { + got, gotErr := parseTimeout(tc.s) + if got != tc.want || gotErr != tc.wantErr { + t.Errorf("parsing %q:\ngot %v, %v\nwant %v, %v", tc.s, got, gotErr, tc.want, tc.wantErr) + } + } +} diff --git a/vendor/golang.org/x/net/webdav/prop.go b/vendor/golang.org/x/net/webdav/prop.go new file mode 100644 index 000000000..e36a3b31d --- /dev/null +++ b/vendor/golang.org/x/net/webdav/prop.go @@ -0,0 +1,418 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "mime" + "net/http" + "os" + "path/filepath" + "strconv" + + "golang.org/x/net/context" +) + +// Proppatch describes a property update instruction as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH +type Proppatch struct { + // Remove specifies whether this patch removes properties. If it does not + // remove them, it sets them. + Remove bool + // Props contains the properties to be set or removed. + Props []Property +} + +// Propstat describes a XML propstat element as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +type Propstat struct { + // Props contains the properties for which Status applies. + Props []Property + + // Status defines the HTTP status code of the properties in Prop. + // Allowed values include, but are not limited to the WebDAV status + // code extensions for HTTP/1.1. + // http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 + Status int + + // XMLError contains the XML representation of the optional error element. + // XML content within this field must not rely on any predefined + // namespace declarations or prefixes. If empty, the XML error element + // is omitted. + XMLError string + + // ResponseDescription contains the contents of the optional + // responsedescription field. If empty, the XML element is omitted. + ResponseDescription string +} + +// makePropstats returns a slice containing those of x and y whose Props slice +// is non-empty. If both are empty, it returns a slice containing an otherwise +// zero Propstat whose HTTP status code is 200 OK. +func makePropstats(x, y Propstat) []Propstat { + pstats := make([]Propstat, 0, 2) + if len(x.Props) != 0 { + pstats = append(pstats, x) + } + if len(y.Props) != 0 { + pstats = append(pstats, y) + } + if len(pstats) == 0 { + pstats = append(pstats, Propstat{ + Status: http.StatusOK, + }) + } + return pstats +} + +// DeadPropsHolder holds the dead properties of a resource. +// +// Dead properties are those properties that are explicitly defined. In +// comparison, live properties, such as DAV:getcontentlength, are implicitly +// defined by the underlying resource, and cannot be explicitly overridden or +// removed. See the Terminology section of +// http://www.webdav.org/specs/rfc4918.html#rfc.section.3 +// +// There is a whitelist of the names of live properties. This package handles +// all live properties, and will only pass non-whitelisted names to the Patch +// method of DeadPropsHolder implementations. +type DeadPropsHolder interface { + // DeadProps returns a copy of the dead properties held. + DeadProps() (map[xml.Name]Property, error) + + // Patch patches the dead properties held. + // + // Patching is atomic; either all or no patches succeed. It returns (nil, + // non-nil) if an internal server error occurred, otherwise the Propstats + // collectively contain one Property for each proposed patch Property. If + // all patches succeed, Patch returns a slice of length one and a Propstat + // element with a 200 OK HTTP status code. If none succeed, for reasons + // other than an internal server error, no Propstat has status 200 OK. + // + // For more details on when various HTTP status codes apply, see + // http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status + Patch([]Proppatch) ([]Propstat, error) +} + +// liveProps contains all supported, protected DAV: properties. +var liveProps = map[xml.Name]struct { + // findFn implements the propfind function of this property. If nil, + // it indicates a hidden property. + findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error) + // dir is true if the property applies to directories. + dir bool +}{ + {Space: "DAV:", Local: "resourcetype"}: { + findFn: findResourceType, + dir: true, + }, + {Space: "DAV:", Local: "displayname"}: { + findFn: findDisplayName, + dir: true, + }, + {Space: "DAV:", Local: "getcontentlength"}: { + findFn: findContentLength, + dir: false, + }, + {Space: "DAV:", Local: "getlastmodified"}: { + findFn: findLastModified, + // http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified + // suggests that getlastmodified should only apply to GETable + // resources, and this package does not support GET on directories. + // + // Nonetheless, some WebDAV clients expect child directories to be + // sortable by getlastmodified date, so this value is true, not false. + // See golang.org/issue/15334. + dir: true, + }, + {Space: "DAV:", Local: "creationdate"}: { + findFn: nil, + dir: false, + }, + {Space: "DAV:", Local: "getcontentlanguage"}: { + findFn: nil, + dir: false, + }, + {Space: "DAV:", Local: "getcontenttype"}: { + findFn: findContentType, + dir: false, + }, + {Space: "DAV:", Local: "getetag"}: { + findFn: findETag, + // findETag implements ETag as the concatenated hex values of a file's + // modification time and size. This is not a reliable synchronization + // mechanism for directories, so we do not advertise getetag for DAV + // collections. + dir: false, + }, + + // TODO: The lockdiscovery property requires LockSystem to list the + // active locks on a resource. + {Space: "DAV:", Local: "lockdiscovery"}: {}, + {Space: "DAV:", Local: "supportedlock"}: { + findFn: findSupportedLock, + dir: true, + }, +} + +// TODO(nigeltao) merge props and allprop? + +// Props returns the status of the properties named pnames for resource name. +// +// Each Propstat has a unique status and each property name will only be part +// of one Propstat element. +func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pstatOK := Propstat{Status: http.StatusOK} + pstatNotFound := Propstat{Status: http.StatusNotFound} + for _, pn := range pnames { + // If this file has dead properties, check if they contain pn. + if dp, ok := deadProps[pn]; ok { + pstatOK.Props = append(pstatOK.Props, dp) + continue + } + // Otherwise, it must either be a live property or we don't know it. + if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) { + innerXML, err := prop.findFn(ctx, fs, ls, name, fi) + if err != nil { + return nil, err + } + pstatOK.Props = append(pstatOK.Props, Property{ + XMLName: pn, + InnerXML: []byte(innerXML), + }) + } else { + pstatNotFound.Props = append(pstatNotFound.Props, Property{ + XMLName: pn, + }) + } + } + return makePropstats(pstatOK, pstatNotFound), nil +} + +// Propnames returns the property names defined for resource name. +func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps)) + for pn, prop := range liveProps { + if prop.findFn != nil && (prop.dir || !isDir) { + pnames = append(pnames, pn) + } + } + for pn := range deadProps { + pnames = append(pnames, pn) + } + return pnames, nil +} + +// Allprop returns the properties defined for resource name and the properties +// named in include. +// +// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined +// within the RFC plus dead properties. Other live properties should only be +// returned if they are named in 'include'. +// +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND +func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) { + pnames, err := propnames(ctx, fs, ls, name) + if err != nil { + return nil, err + } + // Add names from include if they are not already covered in pnames. + nameset := make(map[xml.Name]bool) + for _, pn := range pnames { + nameset[pn] = true + } + for _, pn := range include { + if !nameset[pn] { + pnames = append(pnames, pn) + } + } + return props(ctx, fs, ls, name, pnames) +} + +// Patch patches the properties of resource name. The return values are +// constrained in the same manner as DeadPropsHolder.Patch. +func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) { + conflict := false +loop: + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + conflict = true + break loop + } + } + } + if conflict { + pstatForbidden := Propstat{ + Status: http.StatusForbidden, + XMLError: ``, + } + pstatFailedDep := Propstat{ + Status: StatusFailedDependency, + } + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName}) + } else { + pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName}) + } + } + } + return makePropstats(pstatForbidden, pstatFailedDep), nil + } + + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0) + if err != nil { + return nil, err + } + defer f.Close() + if dph, ok := f.(DeadPropsHolder); ok { + ret, err := dph.Patch(patches) + if err != nil { + return nil, err + } + // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that + // "The contents of the prop XML element must only list the names of + // properties to which the result in the status element applies." + for _, pstat := range ret { + for i, p := range pstat.Props { + pstat.Props[i] = Property{XMLName: p.XMLName} + } + } + return ret, nil + } + // The file doesn't implement the optional DeadPropsHolder interface, so + // all patches are forbidden. + pstat := Propstat{Status: http.StatusForbidden} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + } + } + return []Propstat{pstat}, nil +} + +func escapeXML(s string) string { + for i := 0; i < len(s); i++ { + // As an optimization, if s contains only ASCII letters, digits or a + // few special characters, the escaped value is s itself and we don't + // need to allocate a buffer and convert between string and []byte. + switch c := s[i]; { + case c == ' ' || c == '_' || + ('+' <= c && c <= '9') || // Digits as well as + , - . and / + ('A' <= c && c <= 'Z') || + ('a' <= c && c <= 'z'): + continue + } + // Otherwise, go through the full escaping process. + var buf bytes.Buffer + xml.EscapeText(&buf, []byte(s)) + return buf.String() + } + return s +} + +func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if fi.IsDir() { + return ``, nil + } + return "", nil +} + +func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if slashClean(name) == "/" { + // Hide the real name of a possibly prefixed root directory. + return "", nil + } + return escapeXML(fi.Name()), nil +} + +func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return strconv.FormatInt(fi.Size(), 10), nil +} + +func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return fi.ModTime().Format(http.TimeFormat), nil +} + +func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return "", err + } + defer f.Close() + // This implementation is based on serveContent's code in the standard net/http package. + ctype := mime.TypeByExtension(filepath.Ext(name)) + if ctype != "" { + return ctype, nil + } + // Read a chunk to decide between utf-8 text and binary. + var buf [512]byte + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return "", err + } + ctype = http.DetectContentType(buf[:n]) + // Rewind file. + _, err = f.Seek(0, os.SEEK_SET) + return ctype, err +} + +func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + // The Apache http 2.4 web server by default concatenates the + // modification time and size of a file. We replicate the heuristic + // with nanosecond granularity. + return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil +} + +func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return `` + + `` + + `` + + `` + + ``, nil +} diff --git a/vendor/golang.org/x/net/webdav/prop_test.go b/vendor/golang.org/x/net/webdav/prop_test.go new file mode 100644 index 000000000..57d0e826f --- /dev/null +++ b/vendor/golang.org/x/net/webdav/prop_test.go @@ -0,0 +1,613 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "fmt" + "net/http" + "os" + "reflect" + "sort" + "testing" + + "golang.org/x/net/context" +) + +func TestMemPS(t *testing.T) { + ctx := context.Background() + // calcProps calculates the getlastmodified and getetag DAV: property + // values in pstats for resource name in file-system fs. + calcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error { + fi, err := fs.Stat(ctx, name) + if err != nil { + return err + } + for _, pst := range pstats { + for i, p := range pst.Props { + switch p.XMLName { + case xml.Name{Space: "DAV:", Local: "getlastmodified"}: + p.InnerXML = []byte(fi.ModTime().Format(http.TimeFormat)) + pst.Props[i] = p + case xml.Name{Space: "DAV:", Local: "getetag"}: + if fi.IsDir() { + continue + } + etag, err := findETag(ctx, fs, ls, name, fi) + if err != nil { + return err + } + p.InnerXML = []byte(etag) + pst.Props[i] = p + } + } + } + return nil + } + + const ( + lockEntry = `` + + `` + + `` + + `` + + `` + statForbiddenError = `` + ) + + type propOp struct { + op string + name string + pnames []xml.Name + patches []Proppatch + wantPnames []xml.Name + wantPropstats []Propstat + } + + testCases := []struct { + desc string + noDeadProps bool + buildfs []string + propOp []propOp + }{{ + desc: "propname", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propname", + name: "/dir", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "DAV:", Local: "getlastmodified"}, + }, + }, { + op: "propname", + name: "/file", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + }, + }}, + }, { + desc: "allprop dir and file", + buildfs: []string{"mkdir /dir", "write /file foobarbaz"}, + propOp: []propOp{{ + op: "allprop", + name: "/dir", + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(``), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("dir"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}, + }}, + }, { + op: "allprop", + name: "/file", + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("file"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, + InnerXML: []byte("9"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, + InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}, + }}, + }, { + op: "allprop", + name: "/file", + pnames: []xml.Name{ + {"DAV:", "resourcetype"}, + {"foo", "bar"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("file"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, + InnerXML: []byte("9"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, + InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}}, { + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}}, + }, + }}, + }, { + desc: "propfind DAV:resourcetype", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "resourcetype"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(``), + }}, + }}, + }, { + op: "propfind", + name: "/file", + pnames: []xml.Name{{"DAV:", "resourcetype"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }}, + }}, + }}, + }, { + desc: "propfind unsupported DAV properties", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "getcontentlanguage"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlanguage"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "creationdate"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "creationdate"}, + }}, + }}, + }}, + }, { + desc: "propfind getetag for files but not for directories", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "getetag"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }, { + op: "propfind", + name: "/file", + pnames: []xml.Name{{"DAV:", "getetag"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }}, + }}, + }}, + }, { + desc: "proppatch property on no-dead-properties file system", + buildfs: []string{"mkdir /dir"}, + noDeadProps: true, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }}, + }, { + desc: "proppatch dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + }}, + }, { + desc: "proppatch dead property with failed dependency", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }, { + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("xxx"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + }}, + }, { + Status: StatusFailedDependency, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }}, + }, { + desc: "proppatch remove dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{ + {Space: "foo", Local: "bar"}, + {Space: "spam", Local: "ham"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Remove: true, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{ + {Space: "foo", Local: "bar"}, + {Space: "spam", Local: "ham"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }, { + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + }}, + }, { + desc: "propname with dead property", + buildfs: []string{"touch /file"}, + propOp: []propOp{{ + op: "proppatch", + name: "/file", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propname", + name: "/file", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "foo", Local: "bar"}, + }, + }}, + }, { + desc: "proppatch remove unknown dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Remove: true, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }}, + }, { + desc: "bad: propfind unknown property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"foo:", "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo:", Local: "bar"}, + }}, + }}, + }}, + }} + + for _, tc := range testCases { + fs, err := buildTestFS(tc.buildfs) + if err != nil { + t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) + } + if tc.noDeadProps { + fs = noDeadPropsFS{fs} + } + ls := NewMemLS() + for _, op := range tc.propOp { + desc := fmt.Sprintf("%s: %s %s", tc.desc, op.op, op.name) + if err = calcProps(op.name, fs, ls, op.wantPropstats); err != nil { + t.Fatalf("%s: calcProps: %v", desc, err) + } + + // Call property system. + var propstats []Propstat + switch op.op { + case "propname": + pnames, err := propnames(ctx, fs, ls, op.name) + if err != nil { + t.Errorf("%s: got error %v, want nil", desc, err) + continue + } + sort.Sort(byXMLName(pnames)) + sort.Sort(byXMLName(op.wantPnames)) + if !reflect.DeepEqual(pnames, op.wantPnames) { + t.Errorf("%s: pnames\ngot %q\nwant %q", desc, pnames, op.wantPnames) + } + continue + case "allprop": + propstats, err = allprop(ctx, fs, ls, op.name, op.pnames) + case "propfind": + propstats, err = props(ctx, fs, ls, op.name, op.pnames) + case "proppatch": + propstats, err = patch(ctx, fs, ls, op.name, op.patches) + default: + t.Fatalf("%s: %s not implemented", desc, op.op) + } + if err != nil { + t.Errorf("%s: got error %v, want nil", desc, err) + continue + } + // Compare return values from allprop, propfind or proppatch. + for _, pst := range propstats { + sort.Sort(byPropname(pst.Props)) + } + for _, pst := range op.wantPropstats { + sort.Sort(byPropname(pst.Props)) + } + sort.Sort(byStatus(propstats)) + sort.Sort(byStatus(op.wantPropstats)) + if !reflect.DeepEqual(propstats, op.wantPropstats) { + t.Errorf("%s: propstat\ngot %q\nwant %q", desc, propstats, op.wantPropstats) + } + } + } +} + +func cmpXMLName(a, b xml.Name) bool { + if a.Space != b.Space { + return a.Space < b.Space + } + return a.Local < b.Local +} + +type byXMLName []xml.Name + +func (b byXMLName) Len() int { return len(b) } +func (b byXMLName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byXMLName) Less(i, j int) bool { return cmpXMLName(b[i], b[j]) } + +type byPropname []Property + +func (b byPropname) Len() int { return len(b) } +func (b byPropname) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byPropname) Less(i, j int) bool { return cmpXMLName(b[i].XMLName, b[j].XMLName) } + +type byStatus []Propstat + +func (b byStatus) Len() int { return len(b) } +func (b byStatus) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byStatus) Less(i, j int) bool { return b[i].Status < b[j].Status } + +type noDeadPropsFS struct { + FileSystem +} + +func (fs noDeadPropsFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + f, err := fs.FileSystem.OpenFile(ctx, name, flag, perm) + if err != nil { + return nil, err + } + return noDeadPropsFile{f}, nil +} + +// noDeadPropsFile wraps a File but strips any optional DeadPropsHolder methods +// provided by the underlying File implementation. +type noDeadPropsFile struct { + f File +} + +func (f noDeadPropsFile) Close() error { return f.f.Close() } +func (f noDeadPropsFile) Read(p []byte) (int, error) { return f.f.Read(p) } +func (f noDeadPropsFile) Readdir(count int) ([]os.FileInfo, error) { return f.f.Readdir(count) } +func (f noDeadPropsFile) Seek(off int64, whence int) (int64, error) { return f.f.Seek(off, whence) } +func (f noDeadPropsFile) Stat() (os.FileInfo, error) { return f.f.Stat() } +func (f noDeadPropsFile) Write(p []byte) (int, error) { return f.f.Write(p) } diff --git a/vendor/golang.org/x/net/webdav/webdav.go b/vendor/golang.org/x/net/webdav/webdav.go new file mode 100644 index 000000000..7b56687fc --- /dev/null +++ b/vendor/golang.org/x/net/webdav/webdav.go @@ -0,0 +1,702 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package webdav provides a WebDAV server implementation. +package webdav // import "golang.org/x/net/webdav" + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" +) + +type Handler struct { + // Prefix is the URL path prefix to strip from WebDAV resource paths. + Prefix string + // FileSystem is the virtual file system. + FileSystem FileSystem + // LockSystem is the lock management system. + LockSystem LockSystem + // Logger is an optional error logger. If non-nil, it will be called + // for all HTTP requests. + Logger func(*http.Request, error) +} + +func (h *Handler) stripPrefix(p string) (string, int, error) { + if h.Prefix == "" { + return p, http.StatusOK, nil + } + if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) { + return r, http.StatusOK, nil + } + return p, http.StatusNotFound, errPrefixMismatch +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + status, err := http.StatusBadRequest, errUnsupportedMethod + if h.FileSystem == nil { + status, err = http.StatusInternalServerError, errNoFileSystem + } else if h.LockSystem == nil { + status, err = http.StatusInternalServerError, errNoLockSystem + } else { + switch r.Method { + case "OPTIONS": + status, err = h.handleOptions(w, r) + case "GET", "HEAD", "POST": + status, err = h.handleGetHeadPost(w, r) + case "DELETE": + status, err = h.handleDelete(w, r) + case "PUT": + status, err = h.handlePut(w, r) + case "MKCOL": + status, err = h.handleMkcol(w, r) + case "COPY", "MOVE": + status, err = h.handleCopyMove(w, r) + case "LOCK": + status, err = h.handleLock(w, r) + case "UNLOCK": + status, err = h.handleUnlock(w, r) + case "PROPFIND": + status, err = h.handlePropfind(w, r) + case "PROPPATCH": + status, err = h.handleProppatch(w, r) + } + } + + if status != 0 { + w.WriteHeader(status) + if status != http.StatusNoContent { + w.Write([]byte(StatusText(status))) + } + } + if h.Logger != nil { + h.Logger(r, err) + } +} + +func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) { + token, err = h.LockSystem.Create(now, LockDetails{ + Root: root, + Duration: infiniteTimeout, + ZeroDepth: true, + }) + if err != nil { + if err == ErrLocked { + return "", StatusLocked, err + } + return "", http.StatusInternalServerError, err + } + return token, 0, nil +} + +func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) { + hdr := r.Header.Get("If") + if hdr == "" { + // An empty If header means that the client hasn't previously created locks. + // Even if this client doesn't care about locks, we still need to check that + // the resources aren't locked by another client, so we create temporary + // locks that would conflict with another client's locks. These temporary + // locks are unlocked at the end of the HTTP request. + now, srcToken, dstToken := time.Now(), "", "" + if src != "" { + srcToken, status, err = h.lock(now, src) + if err != nil { + return nil, status, err + } + } + if dst != "" { + dstToken, status, err = h.lock(now, dst) + if err != nil { + if srcToken != "" { + h.LockSystem.Unlock(now, srcToken) + } + return nil, status, err + } + } + + return func() { + if dstToken != "" { + h.LockSystem.Unlock(now, dstToken) + } + if srcToken != "" { + h.LockSystem.Unlock(now, srcToken) + } + }, 0, nil + } + + ih, ok := parseIfHeader(hdr) + if !ok { + return nil, http.StatusBadRequest, errInvalidIfHeader + } + // ih is a disjunction (OR) of ifLists, so any ifList will do. + for _, l := range ih.lists { + lsrc := l.resourceTag + if lsrc == "" { + lsrc = src + } else { + u, err := url.Parse(lsrc) + if err != nil { + continue + } + if u.Host != r.Host { + continue + } + lsrc, status, err = h.stripPrefix(u.Path) + if err != nil { + return nil, status, err + } + } + release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...) + if err == ErrConfirmationFailed { + continue + } + if err != nil { + return nil, http.StatusInternalServerError, err + } + return release, 0, nil + } + // Section 10.4.1 says that "If this header is evaluated and all state lists + // fail, then the request must fail with a 412 (Precondition Failed) status." + // We follow the spec even though the cond_put_corrupt_token test case from + // the litmus test warns on seeing a 412 instead of a 423 (Locked). + return nil, http.StatusPreconditionFailed, ErrLocked +} + +func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ctx := getContext(r) + allow := "OPTIONS, LOCK, PUT, MKCOL" + if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil { + if fi.IsDir() { + allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND" + } else { + allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT" + } + } + w.Header().Set("Allow", allow) + // http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes + w.Header().Set("DAV", "1, 2") + // http://msdn.microsoft.com/en-au/library/cc250217.aspx + w.Header().Set("MS-Author-Via", "DAV") + return 0, nil +} + +func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + // TODO: check locks for read-only access?? + ctx := getContext(r) + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0) + if err != nil { + return http.StatusNotFound, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return http.StatusNotFound, err + } + if fi.IsDir() { + return http.StatusMethodNotAllowed, nil + } + etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) + // Let ServeContent determine the Content-Type header. + http.ServeContent(w, r, reqPath, fi.ModTime(), f) + return 0, nil +} + +func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := getContext(r) + + // TODO: return MultiStatus where appropriate. + + // "godoc os RemoveAll" says that "If the path does not exist, RemoveAll + // returns nil (no error)." WebDAV semantics are that it should return a + // "404 Not Found". We therefore have to Stat before we RemoveAll. + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + if err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil { + return http.StatusMethodNotAllowed, err + } + return http.StatusNoContent, nil +} + +func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + // TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz' + // comments in http.checkEtag. + ctx := getContext(r) + + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return http.StatusNotFound, err + } + _, copyErr := io.Copy(f, r.Body) + fi, statErr := f.Stat() + closeErr := f.Close() + // TODO(rost): Returning 405 Method Not Allowed might not be appropriate. + if copyErr != nil { + return http.StatusMethodNotAllowed, copyErr + } + if statErr != nil { + return http.StatusMethodNotAllowed, statErr + } + if closeErr != nil { + return http.StatusMethodNotAllowed, closeErr + } + etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) + return http.StatusCreated, nil +} + +func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := getContext(r) + + if r.ContentLength > 0 { + return http.StatusUnsupportedMediaType, nil + } + if err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusMethodNotAllowed, err + } + return http.StatusCreated, nil +} + +func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) { + hdr := r.Header.Get("Destination") + if hdr == "" { + return http.StatusBadRequest, errInvalidDestination + } + u, err := url.Parse(hdr) + if err != nil { + return http.StatusBadRequest, errInvalidDestination + } + if u.Host != r.Host { + return http.StatusBadGateway, errInvalidDestination + } + + src, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + + dst, status, err := h.stripPrefix(u.Path) + if err != nil { + return status, err + } + + if dst == "" { + return http.StatusBadGateway, errInvalidDestination + } + if dst == src { + return http.StatusForbidden, errDestinationEqualsSource + } + + ctx := getContext(r) + + if r.Method == "COPY" { + // Section 7.5.1 says that a COPY only needs to lock the destination, + // not both destination and source. Strictly speaking, this is racy, + // even though a COPY doesn't modify the source, if a concurrent + // operation modifies the source. However, the litmus test explicitly + // checks that COPYing a locked-by-another source is OK. + release, status, err := h.confirmLocks(r, "", dst) + if err != nil { + return status, err + } + defer release() + + // Section 9.8.3 says that "The COPY method on a collection without a Depth + // header must act as if a Depth header with value "infinity" was included". + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.8.3 says that "A client may submit a Depth header on a + // COPY on a collection with a value of "0" or "infinity"." + return http.StatusBadRequest, errInvalidDepth + } + } + return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0) + } + + release, status, err := h.confirmLocks(r, src, dst) + if err != nil { + return status, err + } + defer release() + + // Section 9.9.2 says that "The MOVE method on a collection must act as if + // a "Depth: infinity" header was used on it. A client must not submit a + // Depth header on a MOVE on a collection with any value but "infinity"." + if hdr := r.Header.Get("Depth"); hdr != "" { + if parseDepth(hdr) != infiniteDepth { + return http.StatusBadRequest, errInvalidDepth + } + } + return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T") +} + +func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) { + duration, err := parseTimeout(r.Header.Get("Timeout")) + if err != nil { + return http.StatusBadRequest, err + } + li, status, err := readLockInfo(r.Body) + if err != nil { + return status, err + } + + ctx := getContext(r) + token, ld, now, created := "", LockDetails{}, time.Now(), false + if li == (lockInfo{}) { + // An empty lockInfo means to refresh the lock. + ih, ok := parseIfHeader(r.Header.Get("If")) + if !ok { + return http.StatusBadRequest, errInvalidIfHeader + } + if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 { + token = ih.lists[0].conditions[0].Token + } + if token == "" { + return http.StatusBadRequest, errInvalidLockToken + } + ld, err = h.LockSystem.Refresh(now, token, duration) + if err != nil { + if err == ErrNoSuchLock { + return http.StatusPreconditionFailed, err + } + return http.StatusInternalServerError, err + } + + } else { + // Section 9.10.3 says that "If no Depth header is submitted on a LOCK request, + // then the request MUST act as if a "Depth:infinity" had been submitted." + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.10.3 says that "Values other than 0 or infinity must not be + // used with the Depth header on a LOCK method". + return http.StatusBadRequest, errInvalidDepth + } + } + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ld = LockDetails{ + Root: reqPath, + Duration: duration, + OwnerXML: li.Owner.InnerXML, + ZeroDepth: depth == 0, + } + token, err = h.LockSystem.Create(now, ld) + if err != nil { + if err == ErrLocked { + return StatusLocked, err + } + return http.StatusInternalServerError, err + } + defer func() { + if retErr != nil { + h.LockSystem.Unlock(now, token) + } + }() + + // Create the resource if it didn't previously exist. + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + // TODO: detect missing intermediate dirs and return http.StatusConflict? + return http.StatusInternalServerError, err + } + f.Close() + created = true + } + + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We add angle brackets. + w.Header().Set("Lock-Token", "<"+token+">") + } + + w.Header().Set("Content-Type", "application/xml; charset=utf-8") + if created { + // This is "w.WriteHeader(http.StatusCreated)" and not "return + // http.StatusCreated, nil" because we write our own (XML) response to w + // and Handler.ServeHTTP would otherwise write "Created". + w.WriteHeader(http.StatusCreated) + } + writeLockInfo(w, token, ld) + return 0, nil +} + +func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) { + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We strip its angle brackets. + t := r.Header.Get("Lock-Token") + if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' { + return http.StatusBadRequest, errInvalidLockToken + } + t = t[1 : len(t)-1] + + switch err = h.LockSystem.Unlock(time.Now(), t); err { + case nil: + return http.StatusNoContent, err + case ErrForbidden: + return http.StatusForbidden, err + case ErrLocked: + return StatusLocked, err + case ErrNoSuchLock: + return http.StatusConflict, err + default: + return http.StatusInternalServerError, err + } +} + +func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ctx := getContext(r) + fi, err := h.FileSystem.Stat(ctx, reqPath) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth == invalidDepth { + return http.StatusBadRequest, errInvalidDepth + } + } + pf, status, err := readPropfind(r.Body) + if err != nil { + return status, err + } + + mw := multistatusWriter{w: w} + + walkFn := func(reqPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + var pstats []Propstat + if pf.Propname != nil { + pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath) + if err != nil { + return err + } + pstat := Propstat{Status: http.StatusOK} + for _, xmlname := range pnames { + pstat.Props = append(pstat.Props, Property{XMLName: xmlname}) + } + pstats = append(pstats, pstat) + } else if pf.Allprop != nil { + pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop) + } else { + pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop) + } + if err != nil { + return err + } + return mw.write(makePropstatResponse(path.Join(h.Prefix, reqPath), pstats)) + } + + walkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn) + closeErr := mw.close() + if walkErr != nil { + return http.StatusInternalServerError, walkErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + return 0, nil +} + +func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := getContext(r) + + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + patches, status, err := readProppatch(r.Body) + if err != nil { + return status, err + } + pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches) + if err != nil { + return http.StatusInternalServerError, err + } + mw := multistatusWriter{w: w} + writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats)) + closeErr := mw.close() + if writeErr != nil { + return http.StatusInternalServerError, writeErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + return 0, nil +} + +func makePropstatResponse(href string, pstats []Propstat) *response { + resp := response{ + Href: []string{(&url.URL{Path: href}).EscapedPath()}, + Propstat: make([]propstat, 0, len(pstats)), + } + for _, p := range pstats { + var xmlErr *xmlError + if p.XMLError != "" { + xmlErr = &xmlError{InnerXML: []byte(p.XMLError)} + } + resp.Propstat = append(resp.Propstat, propstat{ + Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)), + Prop: p.Props, + ResponseDescription: p.ResponseDescription, + Error: xmlErr, + }) + } + return &resp +} + +const ( + infiniteDepth = -1 + invalidDepth = -2 +) + +// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and +// infiniteDepth. Parsing any other string returns invalidDepth. +// +// Different WebDAV methods have further constraints on valid depths: +// - PROPFIND has no further restrictions, as per section 9.1. +// - COPY accepts only "0" or "infinity", as per section 9.8.3. +// - MOVE accepts only "infinity", as per section 9.9.2. +// - LOCK accepts only "0" or "infinity", as per section 9.10.3. +// These constraints are enforced by the handleXxx methods. +func parseDepth(s string) int { + switch s { + case "0": + return 0 + case "1": + return 1 + case "infinity": + return infiniteDepth + } + return invalidDepth +} + +// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 +const ( + StatusMulti = 207 + StatusUnprocessableEntity = 422 + StatusLocked = 423 + StatusFailedDependency = 424 + StatusInsufficientStorage = 507 +) + +func StatusText(code int) string { + switch code { + case StatusMulti: + return "Multi-Status" + case StatusUnprocessableEntity: + return "Unprocessable Entity" + case StatusLocked: + return "Locked" + case StatusFailedDependency: + return "Failed Dependency" + case StatusInsufficientStorage: + return "Insufficient Storage" + } + return http.StatusText(code) +} + +var ( + errDestinationEqualsSource = errors.New("webdav: destination equals source") + errDirectoryNotEmpty = errors.New("webdav: directory not empty") + errInvalidDepth = errors.New("webdav: invalid depth") + errInvalidDestination = errors.New("webdav: invalid destination") + errInvalidIfHeader = errors.New("webdav: invalid If header") + errInvalidLockInfo = errors.New("webdav: invalid lock info") + errInvalidLockToken = errors.New("webdav: invalid lock token") + errInvalidPropfind = errors.New("webdav: invalid propfind") + errInvalidProppatch = errors.New("webdav: invalid proppatch") + errInvalidResponse = errors.New("webdav: invalid response") + errInvalidTimeout = errors.New("webdav: invalid timeout") + errNoFileSystem = errors.New("webdav: no file system") + errNoLockSystem = errors.New("webdav: no lock system") + errNotADirectory = errors.New("webdav: not a directory") + errPrefixMismatch = errors.New("webdav: prefix mismatch") + errRecursionTooDeep = errors.New("webdav: recursion too deep") + errUnsupportedLockInfo = errors.New("webdav: unsupported lock info") + errUnsupportedMethod = errors.New("webdav: unsupported method") +) diff --git a/vendor/golang.org/x/net/webdav/webdav_test.go b/vendor/golang.org/x/net/webdav/webdav_test.go new file mode 100644 index 000000000..25e0d5421 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/webdav_test.go @@ -0,0 +1,344 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "regexp" + "sort" + "strings" + "testing" + + "golang.org/x/net/context" +) + +// TODO: add tests to check XML responses with the expected prefix path +func TestPrefix(t *testing.T) { + const dst, blah = "Destination", "blah blah blah" + + // createLockBody comes from the example in Section 9.10.7. + const createLockBody = ` + + + + + http://example.org/~ejw/contact.html + + + ` + + do := func(method, urlStr string, body string, wantStatusCode int, headers ...string) (http.Header, error) { + var bodyReader io.Reader + if body != "" { + bodyReader = strings.NewReader(body) + } + req, err := http.NewRequest(method, urlStr, bodyReader) + if err != nil { + return nil, err + } + for len(headers) >= 2 { + req.Header.Add(headers[0], headers[1]) + headers = headers[2:] + } + res, err := http.DefaultTransport.RoundTrip(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != wantStatusCode { + return nil, fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode) + } + return res.Header, nil + } + + prefixes := []string{ + "/", + "/a/", + "/a/b/", + "/a/b/c/", + } + ctx := context.Background() + for _, prefix := range prefixes { + fs := NewMemFS() + h := &Handler{ + FileSystem: fs, + LockSystem: NewMemLS(), + } + mux := http.NewServeMux() + if prefix != "/" { + h.Prefix = prefix + } + mux.Handle(prefix, h) + srv := httptest.NewServer(mux) + defer srv.Close() + + // The script is: + // MKCOL /a + // MKCOL /a/b + // PUT /a/b/c + // COPY /a/b/c /a/b/d + // MKCOL /a/b/e + // MOVE /a/b/d /a/b/e/f + // LOCK /a/b/e/g + // PUT /a/b/e/g + // which should yield the (possibly stripped) filenames /a/b/c, + // /a/b/e/f and /a/b/e/g, plus their parent directories. + + wantA := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusMovedPermanently, + "/a/b/": http.StatusNotFound, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a", "", wantA); err != nil { + t.Errorf("prefix=%-9q MKCOL /a: %v", prefix, err) + continue + } + + wantB := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusMovedPermanently, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a/b", "", wantB); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b: %v", prefix, err) + continue + } + + wantC := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if _, err := do("PUT", srv.URL+"/a/b/c", blah, wantC); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/c: %v", prefix, err) + continue + } + + wantD := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if _, err := do("COPY", srv.URL+"/a/b/c", "", wantD, dst, srv.URL+"/a/b/d"); err != nil { + t.Errorf("prefix=%-9q COPY /a/b/c /a/b/d: %v", prefix, err) + continue + } + + wantE := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a/b/e", "", wantE); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b/e: %v", prefix, err) + continue + } + + wantF := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MOVE", srv.URL+"/a/b/d", "", wantF, dst, srv.URL+"/a/b/e/f"); err != nil { + t.Errorf("prefix=%-9q MOVE /a/b/d /a/b/e/f: %v", prefix, err) + continue + } + + var lockToken string + wantG := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if h, err := do("LOCK", srv.URL+"/a/b/e/g", createLockBody, wantG); err != nil { + t.Errorf("prefix=%-9q LOCK /a/b/e/g: %v", prefix, err) + continue + } else { + lockToken = h.Get("Lock-Token") + } + + ifHeader := fmt.Sprintf("<%s/a/b/e/g> (%s)", srv.URL, lockToken) + wantH := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("PUT", srv.URL+"/a/b/e/g", blah, wantH, "If", ifHeader); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/e/g: %v", prefix, err) + continue + } + + got, err := find(ctx, nil, fs, "/") + if err != nil { + t.Errorf("prefix=%-9q find: %v", prefix, err) + continue + } + sort.Strings(got) + want := map[string][]string{ + "/": {"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f", "/a/b/e/g"}, + "/a/": {"/", "/b", "/b/c", "/b/e", "/b/e/f", "/b/e/g"}, + "/a/b/": {"/", "/c", "/e", "/e/f", "/e/g"}, + "/a/b/c/": {"/"}, + }[prefix] + if !reflect.DeepEqual(got, want) { + t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want) + continue + } + } +} + +func TestEscapeXML(t *testing.T) { + // These test cases aren't exhaustive, and there is more than one way to + // escape e.g. a quot (as """ or """) or an apos. We presume that + // the encoding/xml package tests xml.EscapeText more thoroughly. This test + // here is just a sanity check for this package's escapeXML function, and + // its attempt to provide a fast path (and avoid a bytes.Buffer allocation) + // when escaping filenames is obviously a no-op. + testCases := map[string]string{ + "": "", + " ": " ", + "&": "&", + "*": "*", + "+": "+", + ",": ",", + "-": "-", + ".": ".", + "/": "/", + "0": "0", + "9": "9", + ":": ":", + "<": "<", + ">": ">", + "A": "A", + "_": "_", + "a": "a", + "~": "~", + "\u0201": "\u0201", + "&": "&amp;", + "foo&baz": "foo&<b/ar>baz", + } + + for in, want := range testCases { + if got := escapeXML(in); got != want { + t.Errorf("in=%q: got %q, want %q", in, got, want) + } + } +} + +func TestFilenameEscape(t *testing.T) { + hrefRe := regexp.MustCompile(`([^<]*)`) + displayNameRe := regexp.MustCompile(`([^<]*)`) + do := func(method, urlStr string) (string, string, error) { + req, err := http.NewRequest(method, urlStr, nil) + if err != nil { + return "", "", err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + hrefMatch := hrefRe.FindStringSubmatch(string(b)) + if len(hrefMatch) != 2 { + return "", "", errors.New("D:href not found") + } + displayNameMatch := displayNameRe.FindStringSubmatch(string(b)) + if len(displayNameMatch) != 2 { + return "", "", errors.New("D:displayname not found") + } + + return hrefMatch[1], displayNameMatch[1], nil + } + + testCases := []struct { + name, wantHref, wantDisplayName string + }{{ + name: `/foo%bar`, + wantHref: `/foo%25bar`, + wantDisplayName: `foo%bar`, + }, { + name: `/こんにちわ世界`, + wantHref: `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`, + wantDisplayName: `こんにちわ世界`, + }, { + name: `/Program Files/`, + wantHref: `/Program%20Files`, + wantDisplayName: `Program Files`, + }, { + name: `/go+lang`, + wantHref: `/go+lang`, + wantDisplayName: `go+lang`, + }, { + name: `/go&lang`, + wantHref: `/go&lang`, + wantDisplayName: `go&lang`, + }, { + name: `/goexclusive"` + Shared *struct{} `xml:"lockscope>shared"` + Write *struct{} `xml:"locktype>write"` + Owner owner `xml:"owner"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner +type owner struct { + InnerXML string `xml:",innerxml"` +} + +func readLockInfo(r io.Reader) (li lockInfo, status int, err error) { + c := &countingReader{r: r} + if err = ixml.NewDecoder(c).Decode(&li); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to refresh the lock. + // http://www.webdav.org/specs/rfc4918.html#refreshing-locks + return lockInfo{}, 0, nil + } + err = errInvalidLockInfo + } + return lockInfo{}, http.StatusBadRequest, err + } + // We only support exclusive (non-shared) write locks. In practice, these are + // the only types of locks that seem to matter. + if li.Exclusive == nil || li.Shared != nil || li.Write == nil { + return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo + } + return li, 0, nil +} + +type countingReader struct { + n int + r io.Reader +} + +func (c *countingReader) Read(p []byte) (int, error) { + n, err := c.r.Read(p) + c.n += n + return n, err +} + +func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) { + depth := "infinity" + if ld.ZeroDepth { + depth = "0" + } + timeout := ld.Duration / time.Second + return fmt.Fprintf(w, "\n"+ + "\n"+ + " \n"+ + " \n"+ + " %s\n"+ + " %s\n"+ + " Second-%d\n"+ + " %s\n"+ + " %s\n"+ + "", + depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root), + ) +} + +func escape(s string) string { + for i := 0; i < len(s); i++ { + switch s[i] { + case '"', '&', '\'', '<', '>': + b := bytes.NewBuffer(nil) + ixml.EscapeText(b, []byte(s)) + return b.String() + } + } + return s +} + +// Next returns the next token, if any, in the XML stream of d. +// RFC 4918 requires to ignore comments, processing instructions +// and directives. +// http://www.webdav.org/specs/rfc4918.html#property_values +// http://www.webdav.org/specs/rfc4918.html#xml-extensibility +func next(d *ixml.Decoder) (ixml.Token, error) { + for { + t, err := d.Token() + if err != nil { + return t, err + } + switch t.(type) { + case ixml.Comment, ixml.Directive, ixml.ProcInst: + continue + default: + return t, nil + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind) +type propfindProps []xml.Name + +// UnmarshalXML appends the property names enclosed within start to pn. +// +// It returns an error if start does not contain any properties or if +// properties contain values. Character data between properties is ignored. +func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + for { + t, err := next(d) + if err != nil { + return err + } + switch t.(type) { + case ixml.EndElement: + if len(*pn) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case ixml.StartElement: + name := t.(ixml.StartElement).Name + t, err = next(d) + if err != nil { + return err + } + if _, ok := t.(ixml.EndElement); !ok { + return fmt.Errorf("unexpected token %T", t) + } + *pn = append(*pn, xml.Name(name)) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind +type propfind struct { + XMLName ixml.Name `xml:"DAV: propfind"` + Allprop *struct{} `xml:"DAV: allprop"` + Propname *struct{} `xml:"DAV: propname"` + Prop propfindProps `xml:"DAV: prop"` + Include propfindProps `xml:"DAV: include"` +} + +func readPropfind(r io.Reader) (pf propfind, status int, err error) { + c := countingReader{r: r} + if err = ixml.NewDecoder(&c).Decode(&pf); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to propfind allprop. + // http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND + return propfind{Allprop: new(struct{})}, 0, nil + } + err = errInvalidPropfind + } + return propfind{}, http.StatusBadRequest, err + } + + if pf.Allprop == nil && pf.Include != nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Prop != nil && pf.Propname != nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + return pf, 0, nil +} + +// Property represents a single DAV resource property as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties +type Property struct { + // XMLName is the fully qualified name that identifies this property. + XMLName xml.Name + + // Lang is an optional xml:lang attribute. + Lang string `xml:"xml:lang,attr,omitempty"` + + // InnerXML contains the XML representation of the property value. + // See http://www.webdav.org/specs/rfc4918.html#property_values + // + // Property values of complex type or mixed-content must have fully + // expanded XML namespaces or be self-contained with according + // XML namespace declarations. They must not rely on any XML + // namespace declarations within the scope of the XML document, + // even including the DAV: namespace. + InnerXML []byte `xml:",innerxml"` +} + +// ixmlProperty is the same as the Property type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlProperty struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + InnerXML []byte `xml:",innerxml"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error +// See multistatusWriter for the "D:" namespace prefix. +type xmlError struct { + XMLName ixml.Name `xml:"D:error"` + InnerXML []byte `xml:",innerxml"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +// See multistatusWriter for the "D:" namespace prefix. +type propstat struct { + Prop []Property `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// ixmlPropstat is the same as the propstat type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlPropstat struct { + Prop []ixmlProperty `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace +// before encoding. See multistatusWriter. +func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error { + // Convert from a propstat to an ixmlPropstat. + ixmlPs := ixmlPropstat{ + Prop: make([]ixmlProperty, len(ps.Prop)), + Status: ps.Status, + Error: ps.Error, + ResponseDescription: ps.ResponseDescription, + } + for k, prop := range ps.Prop { + ixmlPs.Prop[k] = ixmlProperty{ + XMLName: ixml.Name(prop.XMLName), + Lang: prop.Lang, + InnerXML: prop.InnerXML, + } + } + + for k, prop := range ixmlPs.Prop { + if prop.XMLName.Space == "DAV:" { + prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local} + ixmlPs.Prop[k] = prop + } + } + // Distinct type to avoid infinite recursion of MarshalXML. + type newpropstat ixmlPropstat + return e.EncodeElement(newpropstat(ixmlPs), start) +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response +// See multistatusWriter for the "D:" namespace prefix. +type response struct { + XMLName ixml.Name `xml:"D:response"` + Href []string `xml:"D:href"` + Propstat []propstat `xml:"D:propstat"` + Status string `xml:"D:status,omitempty"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MultistatusWriter marshals one or more Responses into a XML +// multistatus response. +// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus +// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as +// "DAV:" on this element, is prepended on the nested response, as well as on all +// its nested elements. All property names in the DAV: namespace are prefixed as +// well. This is because some versions of Mini-Redirector (on windows 7) ignore +// elements with a default namespace (no prefixed namespace). A less intrusive fix +// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177 +type multistatusWriter struct { + // ResponseDescription contains the optional responsedescription + // of the multistatus XML element. Only the latest content before + // close will be emitted. Empty response descriptions are not + // written. + responseDescription string + + w http.ResponseWriter + enc *ixml.Encoder +} + +// Write validates and emits a DAV response as part of a multistatus response +// element. +// +// It sets the HTTP status code of its underlying http.ResponseWriter to 207 +// (Multi-Status) and populates the Content-Type header. If r is the +// first, valid response to be written, Write prepends the XML representation +// of r with a multistatus tag. Callers must call close after the last response +// has been written. +func (w *multistatusWriter) write(r *response) error { + switch len(r.Href) { + case 0: + return errInvalidResponse + case 1: + if len(r.Propstat) > 0 != (r.Status == "") { + return errInvalidResponse + } + default: + if len(r.Propstat) > 0 || r.Status == "" { + return errInvalidResponse + } + } + err := w.writeHeader() + if err != nil { + return err + } + return w.enc.Encode(r) +} + +// writeHeader writes a XML multistatus start element on w's underlying +// http.ResponseWriter and returns the result of the write operation. +// After the first write attempt, writeHeader becomes a no-op. +func (w *multistatusWriter) writeHeader() error { + if w.enc != nil { + return nil + } + w.w.Header().Add("Content-Type", "text/xml; charset=utf-8") + w.w.WriteHeader(StatusMulti) + _, err := fmt.Fprintf(w.w, ``) + if err != nil { + return err + } + w.enc = ixml.NewEncoder(w.w) + return w.enc.EncodeToken(ixml.StartElement{ + Name: ixml.Name{ + Space: "DAV:", + Local: "multistatus", + }, + Attr: []ixml.Attr{{ + Name: ixml.Name{Space: "xmlns", Local: "D"}, + Value: "DAV:", + }}, + }) +} + +// Close completes the marshalling of the multistatus response. It returns +// an error if the multistatus response could not be completed. If both the +// return value and field enc of w are nil, then no multistatus response has +// been written. +func (w *multistatusWriter) close() error { + if w.enc == nil { + return nil + } + var end []ixml.Token + if w.responseDescription != "" { + name := ixml.Name{Space: "DAV:", Local: "responsedescription"} + end = append(end, + ixml.StartElement{Name: name}, + ixml.CharData(w.responseDescription), + ixml.EndElement{Name: name}, + ) + } + end = append(end, ixml.EndElement{ + Name: ixml.Name{Space: "DAV:", Local: "multistatus"}, + }) + for _, t := range end { + err := w.enc.EncodeToken(t) + if err != nil { + return err + } + } + return w.enc.Flush() +} + +var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"} + +func xmlLang(s ixml.StartElement, d string) string { + for _, attr := range s.Attr { + if attr.Name == xmlLangName { + return attr.Value + } + } + return d +} + +type xmlValue []byte + +func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + // The XML value of a property can be arbitrary, mixed-content XML. + // To make sure that the unmarshalled value contains all required + // namespaces, we encode all the property value XML tokens into a + // buffer. This forces the encoder to redeclare any used namespaces. + var b bytes.Buffer + e := ixml.NewEncoder(&b) + for { + t, err := next(d) + if err != nil { + return err + } + if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name { + break + } + if err = e.EncodeToken(t); err != nil { + return err + } + } + err := e.Flush() + if err != nil { + return err + } + *v = b.Bytes() + return nil +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch) +type proppatchProps []Property + +// UnmarshalXML appends the property names and values enclosed within start +// to ps. +// +// An xml:lang attribute that is defined either on the DAV:prop or property +// name XML element is propagated to the property's Lang field. +// +// UnmarshalXML returns an error if start does not contain any properties or if +// property values contain syntactically incorrect XML. +func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + lang := xmlLang(start, "") + for { + t, err := next(d) + if err != nil { + return err + } + switch elem := t.(type) { + case ixml.EndElement: + if len(*ps) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case ixml.StartElement: + p := Property{ + XMLName: xml.Name(t.(ixml.StartElement).Name), + Lang: xmlLang(t.(ixml.StartElement), lang), + } + err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem) + if err != nil { + return err + } + *ps = append(*ps, p) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove +type setRemove struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + Prop proppatchProps `xml:"DAV: prop"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate +type propertyupdate struct { + XMLName ixml.Name `xml:"DAV: propertyupdate"` + Lang string `xml:"xml:lang,attr,omitempty"` + SetRemove []setRemove `xml:",any"` +} + +func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) { + var pu propertyupdate + if err = ixml.NewDecoder(r).Decode(&pu); err != nil { + return nil, http.StatusBadRequest, err + } + for _, op := range pu.SetRemove { + remove := false + switch op.XMLName { + case ixml.Name{Space: "DAV:", Local: "set"}: + // No-op. + case ixml.Name{Space: "DAV:", Local: "remove"}: + for _, p := range op.Prop { + if len(p.InnerXML) > 0 { + return nil, http.StatusBadRequest, errInvalidProppatch + } + } + remove = true + default: + return nil, http.StatusBadRequest, errInvalidProppatch + } + patches = append(patches, Proppatch{Remove: remove, Props: op.Prop}) + } + return patches, 0, nil +} diff --git a/vendor/golang.org/x/net/webdav/xml_test.go b/vendor/golang.org/x/net/webdav/xml_test.go new file mode 100644 index 000000000..a3d9e1ed8 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/xml_test.go @@ -0,0 +1,906 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strings" + "testing" + + ixml "golang.org/x/net/webdav/internal/xml" +) + +func TestReadLockInfo(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + wantLI lockInfo + wantStatus int + }{{ + "bad: junk", + "xxx", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: invalid owner XML", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " no end tag \n" + + " \n" + + "", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: invalid UTF-8", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " \xff \n" + + " \n" + + "", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: unfinished XML #1", + "" + + "\n" + + " \n" + + " \n", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: unfinished XML #2", + "" + + "\n" + + " \n" + + " \n" + + " \n", + lockInfo{}, + http.StatusBadRequest, + }, { + "good: empty", + "", + lockInfo{}, + 0, + }, { + "good: plain-text owner", + "" + + "\n" + + " \n" + + " \n" + + " gopher\n" + + "", + lockInfo{ + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, + Exclusive: new(struct{}), + Write: new(struct{}), + Owner: owner{ + InnerXML: "gopher", + }, + }, + 0, + }, { + "section 9.10.7", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " http://example.org/~ejw/contact.html\n" + + " \n" + + "", + lockInfo{ + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, + Exclusive: new(struct{}), + Write: new(struct{}), + Owner: owner{ + InnerXML: "\n http://example.org/~ejw/contact.html\n ", + }, + }, + 0, + }} + + for _, tc := range testCases { + li, status, err := readLockInfo(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(li, tc.wantLI) || status != tc.wantStatus { + t.Errorf("%s:\ngot lockInfo=%v, status=%v\nwant lockInfo=%v, status=%v", + tc.desc, li, status, tc.wantLI, tc.wantStatus) + continue + } + } +} + +func TestReadPropfind(t *testing.T) { + testCases := []struct { + desc string + input string + wantPF propfind + wantStatus int + }{{ + desc: "propfind: propname", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Propname: new(struct{}), + }, + }, { + desc: "propfind: empty body means allprop", + input: "", + wantPF: propfind{ + Allprop: new(struct{}), + }, + }, { + desc: "propfind: allprop", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + }, + }, { + desc: "propfind: allprop followed by include", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: include followed by allprop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: prop with ignored comments", + input: "" + + "\n" + + " \n" + + " \n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind with ignored whitespace", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind with ignored mixed-content", + input: "" + + "\n" + + " foobar\n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propname with ignored element (section A.4)", + input: "" + + "\n" + + " \n" + + " *boss*\n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Propname: new(struct{}), + }, + }, { + desc: "propfind: bad: junk", + input: "xxx", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: propname and allprop (section A.3)", + input: "" + + "\n" + + " " + + " " + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: propname and prop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: allprop and prop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: empty propfind with ignored element (section A.4)", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: empty prop", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: prop with just chardata", + input: "" + + "\n" + + " foo\n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: interrupted prop", + input: "" + + "\n" + + " \n", + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: malformed end element prop", + input: "" + + "\n" + + " \n", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: property with chardata value", + input: "" + + "\n" + + " bar\n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: property with whitespace value", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: include without allprop", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }} + + for _, tc := range testCases { + pf, status, err := readPropfind(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(pf, tc.wantPF) || status != tc.wantStatus { + t.Errorf("%s:\ngot propfind=%v, status=%v\nwant propfind=%v, status=%v", + tc.desc, pf, status, tc.wantPF, tc.wantStatus) + continue + } + } +} + +func TestMultistatusWriter(t *testing.T) { + ///The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + responses []response + respdesc string + writeHeader bool + wantXML string + wantCode int + wantErr error + }{{ + desc: "section 9.2.2 (failed dependency)", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://ns.example.com/", + Local: "Authors", + }, + }}, + Status: "HTTP/1.1 424 Failed Dependency", + }, { + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://ns.example.com/", + Local: "Copyright-Owner", + }, + }}, + Status: "HTTP/1.1 409 Conflict", + }}, + ResponseDescription: "Copyright Owner cannot be deleted or altered.", + }}, + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 424 Failed Dependency` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 409 Conflict` + + ` ` + + ` Copyright Owner cannot be deleted or altered.` + + `` + + ``, + wantCode: StatusMulti, + }, { + desc: "section 9.6.2 (lock-token-submitted)", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Status: "HTTP/1.1 423 Locked", + Error: &xmlError{ + InnerXML: []byte(``), + }, + }}, + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` HTTP/1.1 423 Locked` + + ` ` + + ` ` + + ``, + wantCode: StatusMulti, + }, { + desc: "section 9.1.3", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "bigbox"}, + InnerXML: []byte(`` + + `` + + `Box type A` + + ``), + }, { + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "author"}, + InnerXML: []byte(`` + + `` + + `J.J. Johnson` + + ``), + }}, + Status: "HTTP/1.1 200 OK", + }, { + Prop: []Property{{ + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "DingALing"}, + }, { + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "Random"}, + }}, + Status: "HTTP/1.1 403 Forbidden", + ResponseDescription: "The user does not have access to the DingALing property.", + }}, + }}, + respdesc: "There has been an access violation error.", + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` ` + + ` ` + + ` Box type A` + + ` J.J. Johnson` + + ` ` + + ` HTTP/1.1 200 OK` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 403 Forbidden` + + ` The user does not have access to the DingALing property.` + + ` ` + + ` ` + + ` There has been an access violation error.` + + ``, + wantCode: StatusMulti, + }, { + desc: "no response written", + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "no response written (with description)", + respdesc: "too bad", + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "empty multistatus with header", + writeHeader: true, + wantXML: ``, + wantCode: StatusMulti, + }, { + desc: "bad: no href", + responses: []response{{ + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: multiple hrefs and no status", + responses: []response{{ + Href: []string{"http://example.com/foo", "http://example.com/bar"}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: one href and no propstat", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: status with one href and propstat", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + Status: "HTTP/1.1 200 OK", + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: multiple hrefs and propstat", + responses: []response{{ + Href: []string{ + "http://example.com/foo", + "http://example.com/bar", + }, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }} + + n := xmlNormalizer{omitWhitespace: true} +loop: + for _, tc := range testCases { + rec := httptest.NewRecorder() + w := multistatusWriter{w: rec, responseDescription: tc.respdesc} + if tc.writeHeader { + if err := w.writeHeader(); err != nil { + t.Errorf("%s: got writeHeader error %v, want nil", tc.desc, err) + continue + } + } + for _, r := range tc.responses { + if err := w.write(&r); err != nil { + if err != tc.wantErr { + t.Errorf("%s: got write error %v, want %v", + tc.desc, err, tc.wantErr) + } + continue loop + } + } + if err := w.close(); err != tc.wantErr { + t.Errorf("%s: got close error %v, want %v", + tc.desc, err, tc.wantErr) + continue + } + if rec.Code != tc.wantCode { + t.Errorf("%s: got HTTP status code %d, want %d\n", + tc.desc, rec.Code, tc.wantCode) + continue + } + gotXML := rec.Body.String() + eq, err := n.equalXML(strings.NewReader(gotXML), strings.NewReader(tc.wantXML)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s: XML body\ngot %s\nwant %s", tc.desc, gotXML, tc.wantXML) + } + } +} + +func TestReadProppatch(t *testing.T) { + ppStr := func(pps []Proppatch) string { + var outer []string + for _, pp := range pps { + var inner []string + for _, p := range pp.Props { + inner = append(inner, fmt.Sprintf("{XMLName: %q, Lang: %q, InnerXML: %q}", + p.XMLName, p.Lang, p.InnerXML)) + } + outer = append(outer, fmt.Sprintf("{Remove: %t, Props: [%s]}", + pp.Remove, strings.Join(inner, ", "))) + } + return "[" + strings.Join(outer, ", ") + "]" + } + + testCases := []struct { + desc string + input string + wantPP []Proppatch + wantStatus int + }{{ + desc: "proppatch: section 9.2 (with simple property value)", + input: `` + + `` + + `` + + ` ` + + ` somevalue` + + ` ` + + ` ` + + ` ` + + ` ` + + ``, + wantPP: []Proppatch{{ + Props: []Property{{ + xml.Name{Space: "http://ns.example.com/z/", Local: "Authors"}, + "", + []byte(`somevalue`), + }}, + }, { + Remove: true, + Props: []Property{{ + xml.Name{Space: "http://ns.example.com/z/", Local: "Copyright-Owner"}, + "", + nil, + }}, + }}, + }, { + desc: "proppatch: lang attribute on prop", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ``, + wantPP: []Proppatch{{ + Props: []Property{{ + xml.Name{Space: "http://example.com/ns", Local: "foo"}, + "en", + nil, + }}, + }}, + }, { + desc: "bad: remove with value", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ` Jim Whitehead` + + ` ` + + ` ` + + ` ` + + ``, + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: empty propertyupdate", + input: `` + + `` + + ``, + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: empty prop", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ``, + wantStatus: http.StatusBadRequest, + }} + + for _, tc := range testCases { + pp, status, err := readProppatch(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if status != tc.wantStatus { + t.Errorf("%s: got status %d, want %d", tc.desc, status, tc.wantStatus) + continue + } + if !reflect.DeepEqual(pp, tc.wantPP) || status != tc.wantStatus { + t.Errorf("%s: proppatch\ngot %v\nwant %v", tc.desc, ppStr(pp), ppStr(tc.wantPP)) + } + } +} + +func TestUnmarshalXMLValue(t *testing.T) { + testCases := []struct { + desc string + input string + wantVal string + }{{ + desc: "simple char data", + input: "foo", + wantVal: "foo", + }, { + desc: "empty element", + input: "", + wantVal: "", + }, { + desc: "preserve namespace", + input: ``, + wantVal: ``, + }, { + desc: "preserve root element namespace", + input: ``, + wantVal: ``, + }, { + desc: "preserve whitespace", + input: " \t ", + wantVal: " \t ", + }, { + desc: "preserve mixed content", + input: ` a `, + wantVal: ` a `, + }, { + desc: "section 9.2", + input: `` + + `` + + ` Jim Whitehead` + + ` Roy Fielding` + + ``, + wantVal: `` + + ` Jim Whitehead` + + ` Roy Fielding`, + }, { + desc: "section 4.3.1 (mixed content)", + input: `` + + `` + + ` Jane Doe` + + ` ` + + ` mailto:jane.doe@example.com` + + ` http://www.example.com` + + ` ` + + ` Jane has been working way too long on the` + + ` long-awaited revision of ]]>.` + + ` ` + + ``, + wantVal: `` + + ` Jane Doe` + + ` ` + + ` mailto:jane.doe@example.com` + + ` http://www.example.com` + + ` ` + + ` Jane has been working way too long on the` + + ` long-awaited revision of <RFC2518>.` + + ` `, + }} + + var n xmlNormalizer + for _, tc := range testCases { + d := ixml.NewDecoder(strings.NewReader(tc.input)) + var v xmlValue + if err := d.Decode(&v); err != nil { + t.Errorf("%s: got error %v, want nil", tc.desc, err) + continue + } + eq, err := n.equalXML(bytes.NewReader(v), strings.NewReader(tc.wantVal)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s:\ngot %s\nwant %s", tc.desc, string(v), tc.wantVal) + } + } +} + +// xmlNormalizer normalizes XML. +type xmlNormalizer struct { + // omitWhitespace instructs to ignore whitespace between element tags. + omitWhitespace bool + // omitComments instructs to ignore XML comments. + omitComments bool +} + +// normalize writes the normalized XML content of r to w. It applies the +// following rules +// +// * Rename namespace prefixes according to an internal heuristic. +// * Remove unnecessary namespace declarations. +// * Sort attributes in XML start elements in lexical order of their +// fully qualified name. +// * Remove XML directives and processing instructions. +// * Remove CDATA between XML tags that only contains whitespace, if +// instructed to do so. +// * Remove comments, if instructed to do so. +// +func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error { + d := ixml.NewDecoder(r) + e := ixml.NewEncoder(w) + for { + t, err := d.Token() + if err != nil { + if t == nil && err == io.EOF { + break + } + return err + } + switch val := t.(type) { + case ixml.Directive, ixml.ProcInst: + continue + case ixml.Comment: + if n.omitComments { + continue + } + case ixml.CharData: + if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 { + continue + } + case ixml.StartElement: + start, _ := ixml.CopyToken(val).(ixml.StartElement) + attr := start.Attr[:0] + for _, a := range start.Attr { + if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" { + continue + } + attr = append(attr, a) + } + sort.Sort(byName(attr)) + start.Attr = attr + t = start + } + err = e.EncodeToken(t) + if err != nil { + return err + } + } + return e.Flush() +} + +// equalXML tests for equality of the normalized XML contents of a and b. +func (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) { + var buf bytes.Buffer + if err := n.normalize(&buf, a); err != nil { + return false, err + } + normA := buf.String() + buf.Reset() + if err := n.normalize(&buf, b); err != nil { + return false, err + } + normB := buf.String() + return normA == normB, nil +} + +type byName []ixml.Attr + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { + if a[i].Name.Space != a[j].Name.Space { + return a[i].Name.Space < a[j].Name.Space + } + return a[i].Name.Local < a[j].Name.Local +} diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go new file mode 100644 index 000000000..69a4ac7ee --- /dev/null +++ b/vendor/golang.org/x/net/websocket/client.go @@ -0,0 +1,106 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "io" + "net" + "net/http" + "net/url" +) + +// DialError is an error that occurs while dialling a websocket server. +type DialError struct { + *Config + Err error +} + +func (e *DialError) Error() string { + return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error() +} + +// NewConfig creates a new WebSocket config for client connection. +func NewConfig(server, origin string) (config *Config, err error) { + config = new(Config) + config.Version = ProtocolVersionHybi13 + config.Location, err = url.ParseRequestURI(server) + if err != nil { + return + } + config.Origin, err = url.ParseRequestURI(origin) + if err != nil { + return + } + config.Header = http.Header(make(map[string][]string)) + return +} + +// NewClient creates a new WebSocket client connection over rwc. +func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + err = hybiClientHandshake(config, br, bw) + if err != nil { + return + } + buf := bufio.NewReadWriter(br, bw) + ws = newHybiClientConn(config, buf, rwc) + return +} + +// Dial opens a new client connection to a WebSocket. +func Dial(url_, protocol, origin string) (ws *Conn, err error) { + config, err := NewConfig(url_, origin) + if err != nil { + return nil, err + } + if protocol != "" { + config.Protocol = []string{protocol} + } + return DialConfig(config) +} + +var portMap = map[string]string{ + "ws": "80", + "wss": "443", +} + +func parseAuthority(location *url.URL) string { + if _, ok := portMap[location.Scheme]; ok { + if _, _, err := net.SplitHostPort(location.Host); err != nil { + return net.JoinHostPort(location.Host, portMap[location.Scheme]) + } + } + return location.Host +} + +// DialConfig opens a new client connection to a WebSocket with a config. +func DialConfig(config *Config) (ws *Conn, err error) { + var client net.Conn + if config.Location == nil { + return nil, &DialError{config, ErrBadWebSocketLocation} + } + if config.Origin == nil { + return nil, &DialError{config, ErrBadWebSocketOrigin} + } + dialer := config.Dialer + if dialer == nil { + dialer = &net.Dialer{} + } + client, err = dialWithDialer(dialer, config) + if err != nil { + goto Error + } + ws, err = NewClient(config, client) + if err != nil { + client.Close() + goto Error + } + return + +Error: + return nil, &DialError{config, err} +} diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go new file mode 100644 index 000000000..2dab943a4 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "net" +) + +func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { + switch config.Location.Scheme { + case "ws": + conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + + case "wss": + conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + + default: + err = ErrBadScheme + } + return +} diff --git a/vendor/golang.org/x/net/websocket/dial_test.go b/vendor/golang.org/x/net/websocket/dial_test.go new file mode 100644 index 000000000..aa03e30dd --- /dev/null +++ b/vendor/golang.org/x/net/websocket/dial_test.go @@ -0,0 +1,43 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "fmt" + "log" + "net" + "net/http/httptest" + "testing" + "time" +) + +// This test depend on Go 1.3+ because in earlier versions the Dialer won't be +// used in TLS connections and a timeout won't be triggered. +func TestDialConfigTLSWithDialer(t *testing.T) { + tlsServer := httptest.NewTLSServer(nil) + tlsServerAddr := tlsServer.Listener.Addr().String() + log.Print("Test TLS WebSocket server listening on ", tlsServerAddr) + defer tlsServer.Close() + config, _ := NewConfig(fmt.Sprintf("wss://%s/echo", tlsServerAddr), "http://localhost") + config.Dialer = &net.Dialer{ + Deadline: time.Now().Add(-time.Minute), + } + config.TlsConfig = &tls.Config{ + InsecureSkipVerify: true, + } + _, err := DialConfig(config) + dialerr, ok := err.(*DialError) + if !ok { + t.Fatalf("DialError expected, got %#v", err) + } + neterr, ok := dialerr.Err.(*net.OpError) + if !ok { + t.Fatalf("net.OpError error expected, got %#v", dialerr.Err) + } + if !neterr.Timeout() { + t.Fatalf("expected timeout error, got %#v", neterr) + } +} diff --git a/vendor/golang.org/x/net/websocket/exampledial_test.go b/vendor/golang.org/x/net/websocket/exampledial_test.go new file mode 100644 index 000000000..72bb9d48e --- /dev/null +++ b/vendor/golang.org/x/net/websocket/exampledial_test.go @@ -0,0 +1,31 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket_test + +import ( + "fmt" + "log" + + "golang.org/x/net/websocket" +) + +// This example demonstrates a trivial client. +func ExampleDial() { + origin := "http://localhost/" + url := "ws://localhost:12345/ws" + ws, err := websocket.Dial(url, "", origin) + if err != nil { + log.Fatal(err) + } + if _, err := ws.Write([]byte("hello, world!\n")); err != nil { + log.Fatal(err) + } + var msg = make([]byte, 512) + var n int + if n, err = ws.Read(msg); err != nil { + log.Fatal(err) + } + fmt.Printf("Received: %s.\n", msg[:n]) +} diff --git a/vendor/golang.org/x/net/websocket/examplehandler_test.go b/vendor/golang.org/x/net/websocket/examplehandler_test.go new file mode 100644 index 000000000..f22a98fcd --- /dev/null +++ b/vendor/golang.org/x/net/websocket/examplehandler_test.go @@ -0,0 +1,26 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket_test + +import ( + "io" + "net/http" + + "golang.org/x/net/websocket" +) + +// Echo the data received on the WebSocket. +func EchoServer(ws *websocket.Conn) { + io.Copy(ws, ws) +} + +// This example demonstrates a trivial echo server. +func ExampleHandler() { + http.Handle("/echo", websocket.Handler(EchoServer)) + err := http.ListenAndServe(":12345", nil) + if err != nil { + panic("ListenAndServe: " + err.Error()) + } +} diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go new file mode 100644 index 000000000..8cffdd16c --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -0,0 +1,583 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +// This file implements a protocol of hybi draft. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + + closeStatusNormal = 1000 + closeStatusGoingAway = 1001 + closeStatusProtocolError = 1002 + closeStatusUnsupportedData = 1003 + closeStatusFrameTooLarge = 1004 + closeStatusNoStatusRcvd = 1005 + closeStatusAbnormalClosure = 1006 + closeStatusBadMessageData = 1007 + closeStatusPolicyViolation = 1008 + closeStatusTooBigData = 1009 + closeStatusExtensionMismatch = 1010 + + maxControlFramePayloadLength = 125 +) + +var ( + ErrBadMaskingKey = &ProtocolError{"bad masking key"} + ErrBadPongMessage = &ProtocolError{"bad pong message"} + ErrBadClosingStatus = &ProtocolError{"bad closing status"} + ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"} + ErrNotImplemented = &ProtocolError{"not implemented"} + + handshakeHeader = map[string]bool{ + "Host": true, + "Upgrade": true, + "Connection": true, + "Sec-Websocket-Key": true, + "Sec-Websocket-Origin": true, + "Sec-Websocket-Version": true, + "Sec-Websocket-Protocol": true, + "Sec-Websocket-Accept": true, + } +) + +// A hybiFrameHeader is a frame header as defined in hybi draft. +type hybiFrameHeader struct { + Fin bool + Rsv [3]bool + OpCode byte + Length int64 + MaskingKey []byte + + data *bytes.Buffer +} + +// A hybiFrameReader is a reader for hybi frame. +type hybiFrameReader struct { + reader io.Reader + + header hybiFrameHeader + pos int64 + length int +} + +func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) { + n, err = frame.reader.Read(msg) + if frame.header.MaskingKey != nil { + for i := 0; i < n; i++ { + msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4] + frame.pos++ + } + } + return n, err +} + +func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode } + +func (frame *hybiFrameReader) HeaderReader() io.Reader { + if frame.header.data == nil { + return nil + } + if frame.header.data.Len() == 0 { + return nil + } + return frame.header.data +} + +func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil } + +func (frame *hybiFrameReader) Len() (n int) { return frame.length } + +// A hybiFrameReaderFactory creates new frame reader based on its frame type. +type hybiFrameReaderFactory struct { + *bufio.Reader +} + +// NewFrameReader reads a frame header from the connection, and creates new reader for the frame. +// See Section 5.2 Base Framing protocol for detail. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2 +func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) { + hybiFrame := new(hybiFrameReader) + frame = hybiFrame + var header []byte + var b byte + // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0 + for i := 0; i < 3; i++ { + j := uint(6 - i) + hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0 + } + hybiFrame.header.OpCode = header[0] & 0x0f + + // Second byte. Mask/Payload len(7bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + mask := (b & 0x80) != 0 + b &= 0x7f + lengthFields := 0 + switch { + case b <= 125: // Payload length 7bits. + hybiFrame.header.Length = int64(b) + case b == 126: // Payload length 7+16bits + lengthFields = 2 + case b == 127: // Payload length 7+64bits + lengthFields = 8 + } + for i := 0; i < lengthFields; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits + b &= 0x7f + } + header = append(header, b) + hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b) + } + if mask { + // Masking key. 4 bytes. + for i := 0; i < 4; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b) + } + } + hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length) + hybiFrame.header.data = bytes.NewBuffer(header) + hybiFrame.length = len(header) + int(hybiFrame.header.Length) + return +} + +// A HybiFrameWriter is a writer for hybi frame. +type hybiFrameWriter struct { + writer *bufio.Writer + + header *hybiFrameHeader +} + +func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) { + var header []byte + var b byte + if frame.header.Fin { + b |= 0x80 + } + for i := 0; i < 3; i++ { + if frame.header.Rsv[i] { + j := uint(6 - i) + b |= 1 << j + } + } + b |= frame.header.OpCode + header = append(header, b) + if frame.header.MaskingKey != nil { + b = 0x80 + } else { + b = 0 + } + lengthFields := 0 + length := len(msg) + switch { + case length <= 125: + b |= byte(length) + case length < 65536: + b |= 126 + lengthFields = 2 + default: + b |= 127 + lengthFields = 8 + } + header = append(header, b) + for i := 0; i < lengthFields; i++ { + j := uint((lengthFields - i - 1) * 8) + b = byte((length >> j) & 0xff) + header = append(header, b) + } + if frame.header.MaskingKey != nil { + if len(frame.header.MaskingKey) != 4 { + return 0, ErrBadMaskingKey + } + header = append(header, frame.header.MaskingKey...) + frame.writer.Write(header) + data := make([]byte, length) + for i := range data { + data[i] = msg[i] ^ frame.header.MaskingKey[i%4] + } + frame.writer.Write(data) + err = frame.writer.Flush() + return length, err + } + frame.writer.Write(header) + frame.writer.Write(msg) + err = frame.writer.Flush() + return length, err +} + +func (frame *hybiFrameWriter) Close() error { return nil } + +type hybiFrameWriterFactory struct { + *bufio.Writer + needMaskingKey bool +} + +func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType} + if buf.needMaskingKey { + frameHeader.MaskingKey, err = generateMaskingKey() + if err != nil { + return nil, err + } + } + return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil +} + +type hybiFrameHandler struct { + conn *Conn + payloadType byte +} + +func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) { + if handler.conn.IsServerConn() { + // The client MUST mask all frames sent to the server. + if frame.(*hybiFrameReader).header.MaskingKey == nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } else { + // The server MUST NOT mask all frames. + if frame.(*hybiFrameReader).header.MaskingKey != nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } + if header := frame.HeaderReader(); header != nil { + io.Copy(ioutil.Discard, header) + } + switch frame.PayloadType() { + case ContinuationFrame: + frame.(*hybiFrameReader).header.OpCode = handler.payloadType + case TextFrame, BinaryFrame: + handler.payloadType = frame.PayloadType() + case CloseFrame: + return nil, io.EOF + case PingFrame, PongFrame: + b := make([]byte, maxControlFramePayloadLength) + n, err := io.ReadFull(frame, b) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return nil, err + } + io.Copy(ioutil.Discard, frame) + if frame.PayloadType() == PingFrame { + if _, err := handler.WritePong(b[:n]); err != nil { + return nil, err + } + } + return nil, nil + } + return frame, nil +} + +func (handler *hybiFrameHandler) WriteClose(status int) (err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame) + if err != nil { + return err + } + msg := make([]byte, 2) + binary.BigEndian.PutUint16(msg, uint16(status)) + _, err = w.Write(msg) + w.Close() + return err +} + +func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// newHybiConn creates a new WebSocket connection speaking hybi draft protocol. +func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + if buf == nil { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + buf = bufio.NewReadWriter(br, bw) + } + ws := &Conn{config: config, request: request, buf: buf, rwc: rwc, + frameReaderFactory: hybiFrameReaderFactory{buf.Reader}, + frameWriterFactory: hybiFrameWriterFactory{ + buf.Writer, request == nil}, + PayloadType: TextFrame, + defaultCloseStatus: closeStatusNormal} + ws.frameHandler = &hybiFrameHandler{conn: ws} + return ws +} + +// generateMaskingKey generates a masking key for a frame. +func generateMaskingKey() (maskingKey []byte, err error) { + maskingKey = make([]byte, 4) + if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil { + return + } + return +} + +// generateNonce generates a nonce consisting of a randomly selected 16-byte +// value that has been base64-encoded. +func generateNonce() (nonce []byte) { + key := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + panic(err) + } + nonce = make([]byte, 24) + base64.StdEncoding.Encode(nonce, key) + return +} + +// removeZone removes IPv6 zone identifer from host. +// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" +func removeZone(host string) string { + if !strings.HasPrefix(host, "[") { + return host + } + i := strings.LastIndex(host, "]") + if i < 0 { + return host + } + j := strings.LastIndex(host[:i], "%") + if j < 0 { + return host + } + return host[:j] + host[i:] +} + +// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of +// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string. +func getNonceAccept(nonce []byte) (expected []byte, err error) { + h := sha1.New() + if _, err = h.Write(nonce); err != nil { + return + } + if _, err = h.Write([]byte(websocketGUID)); err != nil { + return + } + expected = make([]byte, 28) + base64.StdEncoding.Encode(expected, h.Sum(nil)) + return +} + +// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17 +func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) { + bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n") + + // According to RFC 6874, an HTTP client, proxy, or other + // intermediary must remove any IPv6 zone identifier attached + // to an outgoing URI. + bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n") + bw.WriteString("Upgrade: websocket\r\n") + bw.WriteString("Connection: Upgrade\r\n") + nonce := generateNonce() + if config.handshakeData != nil { + nonce = []byte(config.handshakeData["key"]) + } + bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n") + bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n") + + if config.Version != ProtocolVersionHybi13 { + return ErrBadProtocolVersion + } + + bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n") + if len(config.Protocol) > 0 { + bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + err = config.Header.WriteSubset(bw, handshakeHeader) + if err != nil { + return err + } + + bw.WriteString("\r\n") + if err = bw.Flush(); err != nil { + return err + } + + resp, err := http.ReadResponse(br, &http.Request{Method: "GET"}) + if err != nil { + return err + } + if resp.StatusCode != 101 { + return ErrBadStatus + } + if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" || + strings.ToLower(resp.Header.Get("Connection")) != "upgrade" { + return ErrBadUpgrade + } + expectedAccept, err := getNonceAccept(nonce) + if err != nil { + return err + } + if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) { + return ErrChallengeResponse + } + if resp.Header.Get("Sec-WebSocket-Extensions") != "" { + return ErrUnsupportedExtensions + } + offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol") + if offeredProtocol != "" { + protocolMatched := false + for i := 0; i < len(config.Protocol); i++ { + if config.Protocol[i] == offeredProtocol { + protocolMatched = true + break + } + } + if !protocolMatched { + return ErrBadWebSocketProtocol + } + config.Protocol = []string{offeredProtocol} + } + + return nil +} + +// newHybiClientConn creates a client WebSocket connection after handshake. +func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn { + return newHybiConn(config, buf, rwc, nil) +} + +// A HybiServerHandshaker performs a server handshake using hybi draft protocol. +type hybiServerHandshaker struct { + *Config + accept []byte +} + +func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) { + c.Version = ProtocolVersionHybi13 + if req.Method != "GET" { + return http.StatusMethodNotAllowed, ErrBadRequestMethod + } + // HTTP version can be safely ignored. + + if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" || + !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") { + return http.StatusBadRequest, ErrNotWebSocket + } + + key := req.Header.Get("Sec-Websocket-Key") + if key == "" { + return http.StatusBadRequest, ErrChallengeResponse + } + version := req.Header.Get("Sec-Websocket-Version") + switch version { + case "13": + c.Version = ProtocolVersionHybi13 + default: + return http.StatusBadRequest, ErrBadWebSocketVersion + } + var scheme string + if req.TLS != nil { + scheme = "wss" + } else { + scheme = "ws" + } + c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI()) + if err != nil { + return http.StatusBadRequest, err + } + protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol")) + if protocol != "" { + protocols := strings.Split(protocol, ",") + for i := 0; i < len(protocols); i++ { + c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i])) + } + } + c.accept, err = getNonceAccept([]byte(key)) + if err != nil { + return http.StatusInternalServerError, err + } + return http.StatusSwitchingProtocols, nil +} + +// Origin parses the Origin header in req. +// If the Origin header is not set, it returns nil and nil. +func Origin(config *Config, req *http.Request) (*url.URL, error) { + var origin string + switch config.Version { + case ProtocolVersionHybi13: + origin = req.Header.Get("Origin") + } + if origin == "" { + return nil, nil + } + return url.ParseRequestURI(origin) +} + +func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) { + if len(c.Protocol) > 0 { + if len(c.Protocol) != 1 { + // You need choose a Protocol in Handshake func in Server. + return ErrBadWebSocketProtocol + } + } + buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n") + buf.WriteString("Upgrade: websocket\r\n") + buf.WriteString("Connection: Upgrade\r\n") + buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n") + if len(c.Protocol) > 0 { + buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + if c.Header != nil { + err := c.Header.WriteSubset(buf, handshakeHeader) + if err != nil { + return err + } + } + buf.WriteString("\r\n") + return buf.Flush() +} + +func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiServerConn(c.Config, buf, rwc, request) +} + +// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol. +func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiConn(config, buf, rwc, request) +} diff --git a/vendor/golang.org/x/net/websocket/hybi_test.go b/vendor/golang.org/x/net/websocket/hybi_test.go new file mode 100644 index 000000000..9504aa2d3 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi_test.go @@ -0,0 +1,608 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "testing" +) + +// Test the getNonceAccept function with values in +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 +func TestSecWebSocketAccept(t *testing.T) { + nonce := []byte("dGhlIHNhbXBsZSBub25jZQ==") + expected := []byte("s3pPLMBiTxaQ9kYGzzhZRbK+xOo=") + accept, err := getNonceAccept(nonce) + if err != nil { + t.Errorf("getNonceAccept: returned error %v", err) + return + } + if !bytes.Equal(expected, accept) { + t.Errorf("getNonceAccept: expected %q got %q", expected, accept) + } +} + +func TestHybiClientHandshake(t *testing.T) { + type test struct { + url, host string + } + tests := []test{ + {"ws://server.example.com/chat", "server.example.com"}, + {"ws://127.0.0.1/chat", "127.0.0.1"}, + } + if _, err := url.ParseRequestURI("http://[fe80::1%25lo0]"); err == nil { + tests = append(tests, test{"ws://[fe80::1%25lo0]/chat", "[fe80::1]"}) + } + + for _, tt := range tests { + var b bytes.Buffer + bw := bufio.NewWriter(&b) + br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= +Sec-WebSocket-Protocol: chat + +`)) + var err error + var config Config + config.Location, err = url.ParseRequestURI(tt.url) + if err != nil { + t.Fatal("location url", err) + } + config.Origin, err = url.ParseRequestURI("http://example.com") + if err != nil { + t.Fatal("origin url", err) + } + config.Protocol = append(config.Protocol, "chat") + config.Protocol = append(config.Protocol, "superchat") + config.Version = ProtocolVersionHybi13 + config.handshakeData = map[string]string{ + "key": "dGhlIHNhbXBsZSBub25jZQ==", + } + if err := hybiClientHandshake(&config, br, bw); err != nil { + t.Fatal("handshake", err) + } + req, err := http.ReadRequest(bufio.NewReader(&b)) + if err != nil { + t.Fatal("read request", err) + } + if req.Method != "GET" { + t.Errorf("request method expected GET, but got %s", req.Method) + } + if req.URL.Path != "/chat" { + t.Errorf("request path expected /chat, but got %s", req.URL.Path) + } + if req.Proto != "HTTP/1.1" { + t.Errorf("request proto expected HTTP/1.1, but got %s", req.Proto) + } + if req.Host != tt.host { + t.Errorf("request host expected %s, but got %s", tt.host, req.Host) + } + var expectedHeader = map[string]string{ + "Connection": "Upgrade", + "Upgrade": "websocket", + "Sec-Websocket-Key": config.handshakeData["key"], + "Origin": config.Origin.String(), + "Sec-Websocket-Protocol": "chat, superchat", + "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), + } + for k, v := range expectedHeader { + if req.Header.Get(k) != v { + t.Errorf("%s expected %s, but got %v", k, v, req.Header.Get(k)) + } + } + } +} + +func TestHybiClientHandshakeWithHeader(t *testing.T) { + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= +Sec-WebSocket-Protocol: chat + +`)) + var err error + config := new(Config) + config.Location, err = url.ParseRequestURI("ws://server.example.com/chat") + if err != nil { + t.Fatal("location url", err) + } + config.Origin, err = url.ParseRequestURI("http://example.com") + if err != nil { + t.Fatal("origin url", err) + } + config.Protocol = append(config.Protocol, "chat") + config.Protocol = append(config.Protocol, "superchat") + config.Version = ProtocolVersionHybi13 + config.Header = http.Header(make(map[string][]string)) + config.Header.Add("User-Agent", "test") + + config.handshakeData = map[string]string{ + "key": "dGhlIHNhbXBsZSBub25jZQ==", + } + err = hybiClientHandshake(config, br, bw) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + req, err := http.ReadRequest(bufio.NewReader(b)) + if err != nil { + t.Fatalf("read request: %v", err) + } + if req.Method != "GET" { + t.Errorf("request method expected GET, but got %q", req.Method) + } + if req.URL.Path != "/chat" { + t.Errorf("request path expected /chat, but got %q", req.URL.Path) + } + if req.Proto != "HTTP/1.1" { + t.Errorf("request proto expected HTTP/1.1, but got %q", req.Proto) + } + if req.Host != "server.example.com" { + t.Errorf("request Host expected server.example.com, but got %v", req.Host) + } + var expectedHeader = map[string]string{ + "Connection": "Upgrade", + "Upgrade": "websocket", + "Sec-Websocket-Key": config.handshakeData["key"], + "Origin": config.Origin.String(), + "Sec-Websocket-Protocol": "chat, superchat", + "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), + "User-Agent": "test", + } + for k, v := range expectedHeader { + if req.Header.Get(k) != v { + t.Errorf(fmt.Sprintf("%s expected %q but got %q", k, v, req.Header.Get(k))) + } + } +} + +func TestHybiServerHandshake(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + expectedProtocols := []string{"chat", "superchat"} + if fmt.Sprintf("%v", config.Protocol) != fmt.Sprintf("%v", expectedProtocols) { + t.Errorf("protocol expected %q but got %q", expectedProtocols, config.Protocol) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + config.Protocol = config.Protocol[:1] + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "Sec-WebSocket-Protocol: chat", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} + +func TestHybiServerHandshakeNoSubProtocol(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + if len(config.Protocol) != 0 { + t.Errorf("len(config.Protocol) expected 0, but got %q", len(config.Protocol)) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} + +func TestHybiServerHandshakeHybiBadVersion(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Sec-WebSocket-Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 9 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != ErrBadWebSocketVersion { + t.Errorf("handshake expected err %q but got %q", ErrBadWebSocketVersion, err) + } + if code != http.StatusBadRequest { + t.Errorf("status expected %q but got %q", http.StatusBadRequest, code) + } +} + +func testHybiFrame(t *testing.T, testHeader, testPayload, testMaskedPayload []byte, frameHeader *hybiFrameHeader) { + b := bytes.NewBuffer([]byte{}) + frameWriterFactory := &hybiFrameWriterFactory{bufio.NewWriter(b), false} + w, _ := frameWriterFactory.NewFrameWriter(TextFrame) + w.(*hybiFrameWriter).header = frameHeader + _, err := w.Write(testPayload) + w.Close() + if err != nil { + t.Errorf("Write error %q", err) + } + var expectedFrame []byte + expectedFrame = append(expectedFrame, testHeader...) + expectedFrame = append(expectedFrame, testMaskedPayload...) + if !bytes.Equal(expectedFrame, b.Bytes()) { + t.Errorf("frame expected %q got %q", expectedFrame, b.Bytes()) + } + frameReaderFactory := &hybiFrameReaderFactory{bufio.NewReader(b)} + r, err := frameReaderFactory.NewFrameReader() + if err != nil { + t.Errorf("Read error %q", err) + } + if header := r.HeaderReader(); header == nil { + t.Errorf("no header") + } else { + actualHeader := make([]byte, r.Len()) + n, err := header.Read(actualHeader) + if err != nil { + t.Errorf("Read header error %q", err) + } else { + if n < len(testHeader) { + t.Errorf("header too short %q got %q", testHeader, actualHeader[:n]) + } + if !bytes.Equal(testHeader, actualHeader[:n]) { + t.Errorf("header expected %q got %q", testHeader, actualHeader[:n]) + } + } + } + if trailer := r.TrailerReader(); trailer != nil { + t.Errorf("unexpected trailer %q", trailer) + } + frame := r.(*hybiFrameReader) + if frameHeader.Fin != frame.header.Fin || + frameHeader.OpCode != frame.header.OpCode || + len(testPayload) != int(frame.header.Length) { + t.Errorf("mismatch %v (%d) vs %v", frameHeader, len(testPayload), frame) + } + payload := make([]byte, len(testPayload)) + _, err = r.Read(payload) + if err != nil && err != io.EOF { + t.Errorf("read %v", err) + } + if !bytes.Equal(testPayload, payload) { + t.Errorf("payload %q vs %q", testPayload, payload) + } +} + +func TestHybiShortTextFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame} + payload := []byte("hello") + testHybiFrame(t, []byte{0x81, 0x05}, payload, payload, frameHeader) + + payload = make([]byte, 125) + testHybiFrame(t, []byte{0x81, 125}, payload, payload, frameHeader) +} + +func TestHybiShortMaskedTextFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame, + MaskingKey: []byte{0xcc, 0x55, 0x80, 0x20}} + payload := []byte("hello") + maskedPayload := []byte{0xa4, 0x30, 0xec, 0x4c, 0xa3} + header := []byte{0x81, 0x85} + header = append(header, frameHeader.MaskingKey...) + testHybiFrame(t, header, payload, maskedPayload, frameHeader) +} + +func TestHybiShortBinaryFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: BinaryFrame} + payload := []byte("hello") + testHybiFrame(t, []byte{0x82, 0x05}, payload, payload, frameHeader) + + payload = make([]byte, 125) + testHybiFrame(t, []byte{0x82, 125}, payload, payload, frameHeader) +} + +func TestHybiControlFrame(t *testing.T) { + payload := []byte("hello") + + frameHeader := &hybiFrameHeader{Fin: true, OpCode: PingFrame} + testHybiFrame(t, []byte{0x89, 0x05}, payload, payload, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PingFrame} + testHybiFrame(t, []byte{0x89, 0x00}, nil, nil, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} + testHybiFrame(t, []byte{0x8A, 0x05}, payload, payload, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} + testHybiFrame(t, []byte{0x8A, 0x00}, nil, nil, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: CloseFrame} + payload = []byte{0x03, 0xe8} // 1000 + testHybiFrame(t, []byte{0x88, 0x02}, payload, payload, frameHeader) +} + +func TestHybiLongFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame} + payload := make([]byte, 126) + testHybiFrame(t, []byte{0x81, 126, 0x00, 126}, payload, payload, frameHeader) + + payload = make([]byte, 65535) + testHybiFrame(t, []byte{0x81, 126, 0xff, 0xff}, payload, payload, frameHeader) + + payload = make([]byte, 65536) + testHybiFrame(t, []byte{0x81, 127, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00}, payload, payload, frameHeader) +} + +func TestHybiClientRead(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o', + 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping + 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + msg := make([]byte, 512) + n, err := conn.Read(msg) + if err != nil { + t.Errorf("read 1st frame, error %q", err) + } + if n != 5 { + t.Errorf("read 1st frame, expect 5, got %d", n) + } + if !bytes.Equal(wireData[2:7], msg[:n]) { + t.Errorf("read 1st frame %v, got %v", wireData[2:7], msg[:n]) + } + n, err = conn.Read(msg) + if err != nil { + t.Errorf("read 2nd frame, error %q", err) + } + if n != 5 { + t.Errorf("read 2nd frame, expect 5, got %d", n) + } + if !bytes.Equal(wireData[16:21], msg[:n]) { + t.Errorf("read 2nd frame %v, got %v", wireData[16:21], msg[:n]) + } + n, err = conn.Read(msg) + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } +} + +func TestHybiShortRead(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o', + 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping + 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + step := 0 + pos := 0 + expectedPos := []int{2, 5, 16, 19} + expectedLen := []int{3, 2, 3, 2} + for { + msg := make([]byte, 3) + n, err := conn.Read(msg) + if step >= len(expectedPos) { + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } + return + } + pos = expectedPos[step] + endPos := pos + expectedLen[step] + if err != nil { + t.Errorf("read from %d, got error %q", pos, err) + return + } + if n != endPos-pos { + t.Errorf("read from %d, expect %d, got %d", pos, endPos-pos, n) + } + if !bytes.Equal(wireData[pos:endPos], msg[:n]) { + t.Errorf("read from %d, frame %v, got %v", pos, wireData[pos:endPos], msg[:n]) + } + step++ + } +} + +func TestHybiServerRead(t *testing.T) { + wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello + 0x89, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // ping: hello + 0x81, 0x85, 0xed, 0x83, 0xb4, 0x24, + 0x9a, 0xec, 0xc6, 0x48, 0x89, // world + } + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request)) + + expected := [][]byte{[]byte("hello"), []byte("world")} + + msg := make([]byte, 512) + n, err := conn.Read(msg) + if err != nil { + t.Errorf("read 1st frame, error %q", err) + } + if n != 5 { + t.Errorf("read 1st frame, expect 5, got %d", n) + } + if !bytes.Equal(expected[0], msg[:n]) { + t.Errorf("read 1st frame %q, got %q", expected[0], msg[:n]) + } + + n, err = conn.Read(msg) + if err != nil { + t.Errorf("read 2nd frame, error %q", err) + } + if n != 5 { + t.Errorf("read 2nd frame, expect 5, got %d", n) + } + if !bytes.Equal(expected[1], msg[:n]) { + t.Errorf("read 2nd frame %q, got %q", expected[1], msg[:n]) + } + + n, err = conn.Read(msg) + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } +} + +func TestHybiServerReadWithoutMasking(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request)) + // server MUST close the connection upon receiving a non-masked frame. + msg := make([]byte, 512) + _, err := conn.Read(msg) + if err != io.EOF { + t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err) + } +} + +func TestHybiClientReadWithMasking(t *testing.T) { + wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello + } + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + // client MUST close the connection upon receiving a masked frame. + msg := make([]byte, 512) + _, err := conn.Read(msg) + if err != io.EOF { + t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err) + } +} + +// Test the hybiServerHandshaker supports firefox implementation and +// checks Connection request header include (but it's not necessary +// equal to) "upgrade" +func TestHybiServerFirefoxHandshake(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: keep-alive, upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + config.Protocol = []string{"chat"} + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "Sec-WebSocket-Protocol: chat", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go new file mode 100644 index 000000000..0895dea19 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/server.go @@ -0,0 +1,113 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "fmt" + "io" + "net/http" +) + +func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) { + var hs serverHandshaker = &hybiServerHandshaker{Config: config} + code, err := hs.ReadHandshake(buf.Reader, req) + if err == ErrBadWebSocketVersion { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if err != nil { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if handshake != nil { + err = handshake(config, req) + if err != nil { + code = http.StatusForbidden + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + } + err = hs.AcceptHandshake(buf.Writer) + if err != nil { + code = http.StatusBadRequest + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + conn = hs.NewServerConn(buf, rwc, req) + return +} + +// Server represents a server of a WebSocket. +type Server struct { + // Config is a WebSocket configuration for new WebSocket connection. + Config + + // Handshake is an optional function in WebSocket handshake. + // For example, you can check, or don't check Origin header. + // Another example, you can select config.Protocol. + Handshake func(*Config, *http.Request) error + + // Handler handles a WebSocket connection. + Handler +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.serveWebSocket(w, req) +} + +func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) { + rwc, buf, err := w.(http.Hijacker).Hijack() + if err != nil { + panic("Hijack failed: " + err.Error()) + } + // The server should abort the WebSocket connection if it finds + // the client did not send a handshake that matches with protocol + // specification. + defer rwc.Close() + conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake) + if err != nil { + return + } + if conn == nil { + panic("unexpected nil conn") + } + s.Handler(conn) +} + +// Handler is a simple interface to a WebSocket browser client. +// It checks if Origin header is valid URL by default. +// You might want to verify websocket.Conn.Config().Origin in the func. +// If you use Server instead of Handler, you could call websocket.Origin and +// check the origin in your Handshake func. So, if you want to accept +// non-browser clients, which do not send an Origin header, set a +// Server.Handshake that does not check the origin. +type Handler func(*Conn) + +func checkOrigin(config *Config, req *http.Request) (err error) { + config.Origin, err = Origin(config, req) + if err == nil && config.Origin == nil { + return fmt.Errorf("null origin") + } + return err +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s := Server{Handler: h, Handshake: checkOrigin} + s.serveWebSocket(w, req) +} diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go new file mode 100644 index 000000000..e242c89a7 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -0,0 +1,448 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements a client and server for the WebSocket protocol +// as specified in RFC 6455. +// +// This package currently lacks some features found in an alternative +// and more actively maintained WebSocket package: +// +// https://godoc.org/github.com/gorilla/websocket +// +package websocket // import "golang.org/x/net/websocket" + +import ( + "bufio" + "crypto/tls" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +const ( + ProtocolVersionHybi13 = 13 + ProtocolVersionHybi = ProtocolVersionHybi13 + SupportedProtocolVersion = "13" + + ContinuationFrame = 0 + TextFrame = 1 + BinaryFrame = 2 + CloseFrame = 8 + PingFrame = 9 + PongFrame = 10 + UnknownFrame = 255 + + DefaultMaxPayloadBytes = 32 << 20 // 32MB +) + +// ProtocolError represents WebSocket protocol errors. +type ProtocolError struct { + ErrorString string +} + +func (err *ProtocolError) Error() string { return err.ErrorString } + +var ( + ErrBadProtocolVersion = &ProtocolError{"bad protocol version"} + ErrBadScheme = &ProtocolError{"bad scheme"} + ErrBadStatus = &ProtocolError{"bad status"} + ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"} + ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"} + ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"} + ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"} + ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"} + ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"} + ErrBadFrame = &ProtocolError{"bad frame"} + ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"} + ErrNotWebSocket = &ProtocolError{"not websocket protocol"} + ErrBadRequestMethod = &ProtocolError{"bad method"} + ErrNotSupported = &ProtocolError{"not supported"} +) + +// ErrFrameTooLarge is returned by Codec's Receive method if payload size +// exceeds limit set by Conn.MaxPayloadBytes +var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit") + +// Addr is an implementation of net.Addr for WebSocket. +type Addr struct { + *url.URL +} + +// Network returns the network type for a WebSocket, "websocket". +func (addr *Addr) Network() string { return "websocket" } + +// Config is a WebSocket configuration +type Config struct { + // A WebSocket server address. + Location *url.URL + + // A Websocket client origin. + Origin *url.URL + + // WebSocket subprotocols. + Protocol []string + + // WebSocket protocol version. + Version int + + // TLS config for secure WebSocket (wss). + TlsConfig *tls.Config + + // Additional header fields to be sent in WebSocket opening handshake. + Header http.Header + + // Dialer used when opening websocket connections. + Dialer *net.Dialer + + handshakeData map[string]string +} + +// serverHandshaker is an interface to handle WebSocket server side handshake. +type serverHandshaker interface { + // ReadHandshake reads handshake request message from client. + // Returns http response code and error if any. + ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) + + // AcceptHandshake accepts the client handshake request and sends + // handshake response back to client. + AcceptHandshake(buf *bufio.Writer) (err error) + + // NewServerConn creates a new WebSocket connection. + NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn) +} + +// frameReader is an interface to read a WebSocket frame. +type frameReader interface { + // Reader is to read payload of the frame. + io.Reader + + // PayloadType returns payload type. + PayloadType() byte + + // HeaderReader returns a reader to read header of the frame. + HeaderReader() io.Reader + + // TrailerReader returns a reader to read trailer of the frame. + // If it returns nil, there is no trailer in the frame. + TrailerReader() io.Reader + + // Len returns total length of the frame, including header and trailer. + Len() int +} + +// frameReaderFactory is an interface to creates new frame reader. +type frameReaderFactory interface { + NewFrameReader() (r frameReader, err error) +} + +// frameWriter is an interface to write a WebSocket frame. +type frameWriter interface { + // Writer is to write payload of the frame. + io.WriteCloser +} + +// frameWriterFactory is an interface to create new frame writer. +type frameWriterFactory interface { + NewFrameWriter(payloadType byte) (w frameWriter, err error) +} + +type frameHandler interface { + HandleFrame(frame frameReader) (r frameReader, err error) + WriteClose(status int) (err error) +} + +// Conn represents a WebSocket connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. +type Conn struct { + config *Config + request *http.Request + + buf *bufio.ReadWriter + rwc io.ReadWriteCloser + + rio sync.Mutex + frameReaderFactory + frameReader + + wio sync.Mutex + frameWriterFactory + + frameHandler + PayloadType byte + defaultCloseStatus int + + // MaxPayloadBytes limits the size of frame payload received over Conn + // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used. + MaxPayloadBytes int +} + +// Read implements the io.Reader interface: +// it reads data of a frame from the WebSocket connection. +// if msg is not large enough for the frame data, it fills the msg and next Read +// will read the rest of the frame data. +// it reads Text frame or Binary frame. +func (ws *Conn) Read(msg []byte) (n int, err error) { + ws.rio.Lock() + defer ws.rio.Unlock() +again: + if ws.frameReader == nil { + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return 0, err + } + ws.frameReader, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return 0, err + } + if ws.frameReader == nil { + goto again + } + } + n, err = ws.frameReader.Read(msg) + if err == io.EOF { + if trailer := ws.frameReader.TrailerReader(); trailer != nil { + io.Copy(ioutil.Discard, trailer) + } + ws.frameReader = nil + goto again + } + return n, err +} + +// Write implements the io.Writer interface: +// it writes data as a frame to the WebSocket connection. +func (ws *Conn) Write(msg []byte) (n int, err error) { + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// Close implements the io.Closer interface. +func (ws *Conn) Close() error { + err := ws.frameHandler.WriteClose(ws.defaultCloseStatus) + err1 := ws.rwc.Close() + if err != nil { + return err + } + return err1 +} + +func (ws *Conn) IsClientConn() bool { return ws.request == nil } +func (ws *Conn) IsServerConn() bool { return ws.request != nil } + +// LocalAddr returns the WebSocket Origin for the connection for client, or +// the WebSocket location for server. +func (ws *Conn) LocalAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Origin} + } + return &Addr{ws.config.Location} +} + +// RemoteAddr returns the WebSocket location for the connection for client, or +// the Websocket Origin for server. +func (ws *Conn) RemoteAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Location} + } + return &Addr{ws.config.Origin} +} + +var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn") + +// SetDeadline sets the connection's network read & write deadlines. +func (ws *Conn) SetDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetDeadline(t) + } + return errSetDeadline +} + +// SetReadDeadline sets the connection's network read deadline. +func (ws *Conn) SetReadDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetReadDeadline(t) + } + return errSetDeadline +} + +// SetWriteDeadline sets the connection's network write deadline. +func (ws *Conn) SetWriteDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetWriteDeadline(t) + } + return errSetDeadline +} + +// Config returns the WebSocket config. +func (ws *Conn) Config() *Config { return ws.config } + +// Request returns the http request upgraded to the WebSocket. +// It is nil for client side. +func (ws *Conn) Request() *http.Request { return ws.request } + +// Codec represents a symmetric pair of functions that implement a codec. +type Codec struct { + Marshal func(v interface{}) (data []byte, payloadType byte, err error) + Unmarshal func(data []byte, payloadType byte, v interface{}) (err error) +} + +// Send sends v marshaled by cd.Marshal as single frame to ws. +func (cd Codec) Send(ws *Conn, v interface{}) (err error) { + data, payloadType, err := cd.Marshal(v) + if err != nil { + return err + } + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(payloadType) + if err != nil { + return err + } + _, err = w.Write(data) + w.Close() + return err +} + +// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores +// in v. The whole frame payload is read to an in-memory buffer; max size of +// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds +// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire +// completely. The next call to Receive would read and discard leftover data of +// previous oversized frame before processing next frame. +func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { + ws.rio.Lock() + defer ws.rio.Unlock() + if ws.frameReader != nil { + _, err = io.Copy(ioutil.Discard, ws.frameReader) + if err != nil { + return err + } + ws.frameReader = nil + } +again: + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return err + } + frame, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return err + } + if frame == nil { + goto again + } + maxPayloadBytes := ws.MaxPayloadBytes + if maxPayloadBytes == 0 { + maxPayloadBytes = DefaultMaxPayloadBytes + } + if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) { + // payload size exceeds limit, no need to call Unmarshal + // + // set frameReader to current oversized frame so that + // the next call to this function can drain leftover + // data before processing the next frame + ws.frameReader = frame + return ErrFrameTooLarge + } + payloadType := frame.PayloadType() + data, err := ioutil.ReadAll(frame) + if err != nil { + return err + } + return cd.Unmarshal(data, payloadType, v) +} + +func marshal(v interface{}) (msg []byte, payloadType byte, err error) { + switch data := v.(type) { + case string: + return []byte(data), TextFrame, nil + case []byte: + return data, BinaryFrame, nil + } + return nil, UnknownFrame, ErrNotSupported +} + +func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + switch data := v.(type) { + case *string: + *data = string(msg) + return nil + case *[]byte: + *data = msg + return nil + } + return ErrNotSupported +} + +/* +Message is a codec to send/receive text/binary data in a frame on WebSocket connection. +To send/receive text frame, use string type. +To send/receive binary frame, use []byte type. + +Trivial usage: + + import "websocket" + + // receive text frame + var message string + websocket.Message.Receive(ws, &message) + + // send text frame + message = "hello" + websocket.Message.Send(ws, message) + + // receive binary frame + var data []byte + websocket.Message.Receive(ws, &data) + + // send binary frame + data = []byte{0, 1, 2} + websocket.Message.Send(ws, data) + +*/ +var Message = Codec{marshal, unmarshal} + +func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) { + msg, err = json.Marshal(v) + return msg, TextFrame, err +} + +func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + return json.Unmarshal(msg, v) +} + +/* +JSON is a codec to send/receive JSON data in a frame from a WebSocket connection. + +Trivial usage: + + import "websocket" + + type T struct { + Msg string + Count int + } + + // receive JSON type T + var data T + websocket.JSON.Receive(ws, &data) + + // send JSON type T + websocket.JSON.Send(ws, data) +*/ +var JSON = Codec{jsonMarshal, jsonUnmarshal} diff --git a/vendor/golang.org/x/net/websocket/websocket_test.go b/vendor/golang.org/x/net/websocket/websocket_test.go new file mode 100644 index 000000000..2054ce85a --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket_test.go @@ -0,0 +1,665 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "log" + "net" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +var serverAddr string +var once sync.Once + +func echoServer(ws *Conn) { + defer ws.Close() + io.Copy(ws, ws) +} + +type Count struct { + S string + N int +} + +func countServer(ws *Conn) { + defer ws.Close() + for { + var count Count + err := JSON.Receive(ws, &count) + if err != nil { + return + } + count.N++ + count.S = strings.Repeat(count.S, count.N) + err = JSON.Send(ws, count) + if err != nil { + return + } + } +} + +type testCtrlAndDataHandler struct { + hybiFrameHandler +} + +func (h *testCtrlAndDataHandler) WritePing(b []byte) (int, error) { + h.hybiFrameHandler.conn.wio.Lock() + defer h.hybiFrameHandler.conn.wio.Unlock() + w, err := h.hybiFrameHandler.conn.frameWriterFactory.NewFrameWriter(PingFrame) + if err != nil { + return 0, err + } + n, err := w.Write(b) + w.Close() + return n, err +} + +func ctrlAndDataServer(ws *Conn) { + defer ws.Close() + h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} + ws.frameHandler = h + + go func() { + for i := 0; ; i++ { + var b []byte + if i%2 != 0 { // with or without payload + b = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-SERVER", i)) + } + if _, err := h.WritePing(b); err != nil { + break + } + if _, err := h.WritePong(b); err != nil { // unsolicited pong + break + } + time.Sleep(10 * time.Millisecond) + } + }() + + b := make([]byte, 128) + for { + n, err := ws.Read(b) + if err != nil { + break + } + if _, err := ws.Write(b[:n]); err != nil { + break + } + } +} + +func subProtocolHandshake(config *Config, req *http.Request) error { + for _, proto := range config.Protocol { + if proto == "chat" { + config.Protocol = []string{proto} + return nil + } + } + return ErrBadWebSocketProtocol +} + +func subProtoServer(ws *Conn) { + for _, proto := range ws.Config().Protocol { + io.WriteString(ws, proto) + } +} + +func startServer() { + http.Handle("/echo", Handler(echoServer)) + http.Handle("/count", Handler(countServer)) + http.Handle("/ctrldata", Handler(ctrlAndDataServer)) + subproto := Server{ + Handshake: subProtocolHandshake, + Handler: Handler(subProtoServer), + } + http.Handle("/subproto", subproto) + server := httptest.NewServer(nil) + serverAddr = server.Listener.Addr().String() + log.Print("Test WebSocket server listening on ", serverAddr) +} + +func newConfig(t *testing.T, path string) *Config { + config, _ := NewConfig(fmt.Sprintf("ws://%s%s", serverAddr, path), "http://localhost") + return config +} + +func TestEcho(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + msg := []byte("hello, world\n") + if _, err := conn.Write(msg); err != nil { + t.Errorf("Write: %v", err) + } + var actual_msg = make([]byte, 512) + n, err := conn.Read(actual_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + actual_msg = actual_msg[0:n] + if !bytes.Equal(msg, actual_msg) { + t.Errorf("Echo: expected %q got %q", msg, actual_msg) + } + conn.Close() +} + +func TestAddr(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + ra := conn.RemoteAddr().String() + if !strings.HasPrefix(ra, "ws://") || !strings.HasSuffix(ra, "/echo") { + t.Errorf("Bad remote addr: %v", ra) + } + la := conn.LocalAddr().String() + if !strings.HasPrefix(la, "http://") { + t.Errorf("Bad local addr: %v", la) + } + conn.Close() +} + +func TestCount(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/count"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + var count Count + count.S = "hello" + if err := JSON.Send(conn, count); err != nil { + t.Errorf("Write: %v", err) + } + if err := JSON.Receive(conn, &count); err != nil { + t.Errorf("Read: %v", err) + } + if count.N != 1 { + t.Errorf("count: expected %d got %d", 1, count.N) + } + if count.S != "hello" { + t.Errorf("count: expected %q got %q", "hello", count.S) + } + if err := JSON.Send(conn, count); err != nil { + t.Errorf("Write: %v", err) + } + if err := JSON.Receive(conn, &count); err != nil { + t.Errorf("Read: %v", err) + } + if count.N != 2 { + t.Errorf("count: expected %d got %d", 2, count.N) + } + if count.S != "hellohello" { + t.Errorf("count: expected %q got %q", "hellohello", count.S) + } + conn.Close() +} + +func TestWithQuery(t *testing.T) { + once.Do(startServer) + + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + config := newConfig(t, "/echo") + config.Location, err = url.ParseRequestURI(fmt.Sprintf("ws://%s/echo?q=v", serverAddr)) + if err != nil { + t.Fatal("location url", err) + } + + ws, err := NewClient(config, client) + if err != nil { + t.Errorf("WebSocket handshake: %v", err) + return + } + ws.Close() +} + +func testWithProtocol(t *testing.T, subproto []string) (string, error) { + once.Do(startServer) + + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + config := newConfig(t, "/subproto") + config.Protocol = subproto + + ws, err := NewClient(config, client) + if err != nil { + return "", err + } + msg := make([]byte, 16) + n, err := ws.Read(msg) + if err != nil { + return "", err + } + ws.Close() + return string(msg[:n]), nil +} + +func TestWithProtocol(t *testing.T) { + proto, err := testWithProtocol(t, []string{"chat"}) + if err != nil { + t.Errorf("SubProto: unexpected error: %v", err) + } + if proto != "chat" { + t.Errorf("SubProto: expected %q, got %q", "chat", proto) + } +} + +func TestWithTwoProtocol(t *testing.T) { + proto, err := testWithProtocol(t, []string{"test", "chat"}) + if err != nil { + t.Errorf("SubProto: unexpected error: %v", err) + } + if proto != "chat" { + t.Errorf("SubProto: expected %q, got %q", "chat", proto) + } +} + +func TestWithBadProtocol(t *testing.T) { + _, err := testWithProtocol(t, []string{"test"}) + if err != ErrBadStatus { + t.Errorf("SubProto: expected %v, got %v", ErrBadStatus, err) + } +} + +func TestHTTP(t *testing.T) { + once.Do(startServer) + + // If the client did not send a handshake that matches the protocol + // specification, the server MUST return an HTTP response with an + // appropriate error code (such as 400 Bad Request) + resp, err := http.Get(fmt.Sprintf("http://%s/echo", serverAddr)) + if err != nil { + t.Errorf("Get: error %#v", err) + return + } + if resp == nil { + t.Error("Get: resp is null") + return + } + if resp.StatusCode != http.StatusBadRequest { + t.Errorf("Get: expected %q got %q", http.StatusBadRequest, resp.StatusCode) + } +} + +func TestTrailingSpaces(t *testing.T) { + // http://code.google.com/p/go/issues/detail?id=955 + // The last runs of this create keys with trailing spaces that should not be + // generated by the client. + once.Do(startServer) + config := newConfig(t, "/echo") + for i := 0; i < 30; i++ { + // body + ws, err := DialConfig(config) + if err != nil { + t.Errorf("Dial #%d failed: %v", i, err) + break + } + ws.Close() + } +} + +func TestDialConfigBadVersion(t *testing.T) { + once.Do(startServer) + config := newConfig(t, "/echo") + config.Version = 1234 + + _, err := DialConfig(config) + + if dialerr, ok := err.(*DialError); ok { + if dialerr.Err != ErrBadProtocolVersion { + t.Errorf("dial expected err %q but got %q", ErrBadProtocolVersion, dialerr.Err) + } + } +} + +func TestDialConfigWithDialer(t *testing.T) { + once.Do(startServer) + config := newConfig(t, "/echo") + config.Dialer = &net.Dialer{ + Deadline: time.Now().Add(-time.Minute), + } + _, err := DialConfig(config) + dialerr, ok := err.(*DialError) + if !ok { + t.Fatalf("DialError expected, got %#v", err) + } + neterr, ok := dialerr.Err.(*net.OpError) + if !ok { + t.Fatalf("net.OpError error expected, got %#v", dialerr.Err) + } + if !neterr.Timeout() { + t.Fatalf("expected timeout error, got %#v", neterr) + } +} + +func TestSmallBuffer(t *testing.T) { + // http://code.google.com/p/go/issues/detail?id=1145 + // Read should be able to handle reading a fragment of a frame. + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + msg := []byte("hello, world\n") + if _, err := conn.Write(msg); err != nil { + t.Errorf("Write: %v", err) + } + var small_msg = make([]byte, 8) + n, err := conn.Read(small_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + if !bytes.Equal(msg[:len(small_msg)], small_msg) { + t.Errorf("Echo: expected %q got %q", msg[:len(small_msg)], small_msg) + } + var second_msg = make([]byte, len(msg)) + n, err = conn.Read(second_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + second_msg = second_msg[0:n] + if !bytes.Equal(msg[len(small_msg):], second_msg) { + t.Errorf("Echo: expected %q got %q", msg[len(small_msg):], second_msg) + } + conn.Close() +} + +var parseAuthorityTests = []struct { + in *url.URL + out string +}{ + { + &url.URL{ + Scheme: "ws", + Host: "www.google.com", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "wss", + Host: "www.google.com", + }, + "www.google.com:443", + }, + { + &url.URL{ + Scheme: "ws", + Host: "www.google.com:80", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "wss", + Host: "www.google.com:443", + }, + "www.google.com:443", + }, + // some invalid ones for parseAuthority. parseAuthority doesn't + // concern itself with the scheme unless it actually knows about it + { + &url.URL{ + Scheme: "http", + Host: "www.google.com", + }, + "www.google.com", + }, + { + &url.URL{ + Scheme: "http", + Host: "www.google.com:80", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "asdf", + Host: "127.0.0.1", + }, + "127.0.0.1", + }, + { + &url.URL{ + Scheme: "asdf", + Host: "www.google.com", + }, + "www.google.com", + }, +} + +func TestParseAuthority(t *testing.T) { + for _, tt := range parseAuthorityTests { + out := parseAuthority(tt.in) + if out != tt.out { + t.Errorf("got %v; want %v", out, tt.out) + } + } +} + +type closerConn struct { + net.Conn + closed int // count of the number of times Close was called +} + +func (c *closerConn) Close() error { + c.closed++ + return c.Conn.Close() +} + +func TestClose(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/11454") + } + + once.Do(startServer) + + conn, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + cc := closerConn{Conn: conn} + + client, err := NewClient(newConfig(t, "/echo"), &cc) + if err != nil { + t.Fatalf("WebSocket handshake: %v", err) + } + + // set the deadline to ten minutes ago, which will have expired by the time + // client.Close sends the close status frame. + conn.SetDeadline(time.Now().Add(-10 * time.Minute)) + + if err := client.Close(); err == nil { + t.Errorf("ws.Close(): expected error, got %v", err) + } + if cc.closed < 1 { + t.Fatalf("ws.Close(): expected underlying ws.rwc.Close to be called > 0 times, got: %v", cc.closed) + } +} + +var originTests = []struct { + req *http.Request + origin *url.URL +}{ + { + req: &http.Request{ + Header: http.Header{ + "Origin": []string{"http://www.example.com"}, + }, + }, + origin: &url.URL{ + Scheme: "http", + Host: "www.example.com", + }, + }, + { + req: &http.Request{}, + }, +} + +func TestOrigin(t *testing.T) { + conf := newConfig(t, "/echo") + conf.Version = ProtocolVersionHybi13 + for i, tt := range originTests { + origin, err := Origin(conf, tt.req) + if err != nil { + t.Error(err) + continue + } + if !reflect.DeepEqual(origin, tt.origin) { + t.Errorf("#%d: got origin %v; want %v", i, origin, tt.origin) + continue + } + } +} + +func TestCtrlAndData(t *testing.T) { + once.Do(startServer) + + c, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal(err) + } + ws, err := NewClient(newConfig(t, "/ctrldata"), c) + if err != nil { + t.Fatal(err) + } + defer ws.Close() + + h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} + ws.frameHandler = h + + b := make([]byte, 128) + for i := 0; i < 2; i++ { + data := []byte(fmt.Sprintf("#%d-DATA-FRAME-FROM-CLIENT", i)) + if _, err := ws.Write(data); err != nil { + t.Fatalf("#%d: %v", i, err) + } + var ctrl []byte + if i%2 != 0 { // with or without payload + ctrl = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-CLIENT", i)) + } + if _, err := h.WritePing(ctrl); err != nil { + t.Fatalf("#%d: %v", i, err) + } + n, err := ws.Read(b) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + if !bytes.Equal(b[:n], data) { + t.Fatalf("#%d: got %v; want %v", i, b[:n], data) + } + } +} + +func TestCodec_ReceiveLimited(t *testing.T) { + const limit = 2048 + var payloads [][]byte + for _, size := range []int{ + 1024, + 2048, + 4096, // receive of this message would be interrupted due to limit + 2048, // this one is to make sure next receive recovers discarding leftovers + } { + b := make([]byte, size) + rand.Read(b) + payloads = append(payloads, b) + } + handlerDone := make(chan struct{}) + limitedHandler := func(ws *Conn) { + defer close(handlerDone) + ws.MaxPayloadBytes = limit + defer ws.Close() + for i, p := range payloads { + t.Logf("payload #%d (size %d, exceeds limit: %v)", i, len(p), len(p) > limit) + var recv []byte + err := Message.Receive(ws, &recv) + switch err { + case nil: + case ErrFrameTooLarge: + if len(p) <= limit { + t.Fatalf("unexpected frame size limit: expected %d bytes of payload having limit at %d", len(p), limit) + } + continue + default: + t.Fatalf("unexpected error: %v (want either nil or ErrFrameTooLarge)", err) + } + if len(recv) > limit { + t.Fatalf("received %d bytes of payload having limit at %d", len(recv), limit) + } + if !bytes.Equal(p, recv) { + t.Fatalf("received payload differs:\ngot:\t%v\nwant:\t%v", recv, p) + } + } + } + server := httptest.NewServer(Handler(limitedHandler)) + defer server.CloseClientConnections() + defer server.Close() + addr := server.Listener.Addr().String() + ws, err := Dial("ws://"+addr+"/", "", "http://localhost/") + if err != nil { + t.Fatal(err) + } + defer ws.Close() + for i, p := range payloads { + if err := Message.Send(ws, p); err != nil { + t.Fatalf("payload #%d (size %d): %v", i, len(p), err) + } + } + <-handlerDone +} diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf.go b/vendor/golang.org/x/net/xsrftoken/xsrf.go new file mode 100644 index 000000000..bc861e1f3 --- /dev/null +++ b/vendor/golang.org/x/net/xsrftoken/xsrf.go @@ -0,0 +1,94 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xsrftoken provides methods for generating and validating secure XSRF tokens. +package xsrftoken // import "golang.org/x/net/xsrftoken" + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "fmt" + "strconv" + "strings" + "time" +) + +// Timeout is the duration for which XSRF tokens are valid. +// It is exported so clients may set cookie timeouts that match generated tokens. +const Timeout = 24 * time.Hour + +// clean sanitizes a string for inclusion in a token by replacing all ":"s. +func clean(s string) string { + return strings.Replace(s, ":", "_", -1) +} + +// Generate returns a URL-safe secure XSRF token that expires in 24 hours. +// +// key is a secret key for your application; it must be non-empty. +// userID is an optional unique identifier for the user. +// actionID is an optional action the user is taking (e.g. POSTing to a particular path). +func Generate(key, userID, actionID string) string { + return generateTokenAtTime(key, userID, actionID, time.Now()) +} + +// generateTokenAtTime is like Generate, but returns a token that expires 24 hours from now. +func generateTokenAtTime(key, userID, actionID string, now time.Time) string { + if len(key) == 0 { + panic("zero length xsrf secret key") + } + // Round time up and convert to milliseconds. + milliTime := (now.UnixNano() + 1e6 - 1) / 1e6 + + h := hmac.New(sha1.New, []byte(key)) + fmt.Fprintf(h, "%s:%s:%d", clean(userID), clean(actionID), milliTime) + + // Get the padded base64 string then removing the padding. + tok := string(h.Sum(nil)) + tok = base64.URLEncoding.EncodeToString([]byte(tok)) + tok = strings.TrimRight(tok, "=") + + return fmt.Sprintf("%s:%d", tok, milliTime) +} + +// Valid reports whether a token is a valid, unexpired token returned by Generate. +func Valid(token, key, userID, actionID string) bool { + return validTokenAtTime(token, key, userID, actionID, time.Now()) +} + +// validTokenAtTime reports whether a token is valid at the given time. +func validTokenAtTime(token, key, userID, actionID string, now time.Time) bool { + if len(key) == 0 { + panic("zero length xsrf secret key") + } + // Extract the issue time of the token. + sep := strings.LastIndex(token, ":") + if sep < 0 { + return false + } + millis, err := strconv.ParseInt(token[sep+1:], 10, 64) + if err != nil { + return false + } + issueTime := time.Unix(0, millis*1e6) + + // Check that the token is not expired. + if now.Sub(issueTime) >= Timeout { + return false + } + + // Check that the token is not from the future. + // Allow 1 minute grace period in case the token is being verified on a + // machine whose clock is behind the machine that issued the token. + if issueTime.After(now.Add(1 * time.Minute)) { + return false + } + + expected := generateTokenAtTime(key, userID, actionID, issueTime) + + // Check that the token matches the expected value. + // Use constant time comparison to avoid timing attacks. + return subtle.ConstantTimeCompare([]byte(token), []byte(expected)) == 1 +} diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf_test.go b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go new file mode 100644 index 000000000..6c8e7d9b5 --- /dev/null +++ b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go @@ -0,0 +1,83 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xsrftoken + +import ( + "encoding/base64" + "testing" + "time" +) + +const ( + key = "quay" + userID = "12345678" + actionID = "POST /form" +) + +var ( + now = time.Now() + oneMinuteFromNow = now.Add(1 * time.Minute) +) + +func TestValidToken(t *testing.T) { + tok := generateTokenAtTime(key, userID, actionID, now) + if !validTokenAtTime(tok, key, userID, actionID, oneMinuteFromNow) { + t.Error("One second later: Expected token to be valid") + } + if !validTokenAtTime(tok, key, userID, actionID, now.Add(Timeout-1*time.Nanosecond)) { + t.Error("Just before timeout: Expected token to be valid") + } + if !validTokenAtTime(tok, key, userID, actionID, now.Add(-1*time.Minute+1*time.Millisecond)) { + t.Error("One minute in the past: Expected token to be valid") + } +} + +// TestSeparatorReplacement tests that separators are being correctly substituted +func TestSeparatorReplacement(t *testing.T) { + tok := generateTokenAtTime("foo:bar", "baz", "wah", now) + tok2 := generateTokenAtTime("foo", "bar:baz", "wah", now) + if tok == tok2 { + t.Errorf("Expected generated tokens to be different") + } +} + +func TestInvalidToken(t *testing.T) { + invalidTokenTests := []struct { + name, key, userID, actionID string + t time.Time + }{ + {"Bad key", "foobar", userID, actionID, oneMinuteFromNow}, + {"Bad userID", key, "foobar", actionID, oneMinuteFromNow}, + {"Bad actionID", key, userID, "foobar", oneMinuteFromNow}, + {"Expired", key, userID, actionID, now.Add(Timeout + 1*time.Millisecond)}, + {"More than 1 minute from the future", key, userID, actionID, now.Add(-1*time.Nanosecond - 1*time.Minute)}, + } + + tok := generateTokenAtTime(key, userID, actionID, now) + for _, itt := range invalidTokenTests { + if validTokenAtTime(tok, itt.key, itt.userID, itt.actionID, itt.t) { + t.Errorf("%v: Expected token to be invalid", itt.name) + } + } +} + +// TestValidateBadData primarily tests that no unexpected panics are triggered +// during parsing +func TestValidateBadData(t *testing.T) { + badDataTests := []struct { + name, tok string + }{ + {"Invalid Base64", "ASDab24(@)$*=="}, + {"No delimiter", base64.URLEncoding.EncodeToString([]byte("foobar12345678"))}, + {"Invalid time", base64.URLEncoding.EncodeToString([]byte("foobar:foobar"))}, + {"Wrong length", "1234" + generateTokenAtTime(key, userID, actionID, now)}, + } + + for _, bdt := range badDataTests { + if validTokenAtTime(bdt.tok, key, userID, actionID, oneMinuteFromNow) { + t.Errorf("%v: Expected token to be invalid", bdt.name) + } + } +} diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/sync/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTING.md b/vendor/golang.org/x/sync/CONTRIBUTING.md new file mode 100644 index 000000000..d0485e887 --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/README.md b/vendor/golang.org/x/sync/README.md new file mode 100644 index 000000000..1f8436cc9 --- /dev/null +++ b/vendor/golang.org/x/sync/README.md @@ -0,0 +1,18 @@ +# Go Sync + +This repository provides Go concurrency primitives in addition to the +ones provided by the language and "sync" and "sync/atomic" packages. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/sync`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/sync`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the sync repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/sync:" in the +subject line, so it is easy to find. diff --git a/vendor/golang.org/x/sync/codereview.cfg b/vendor/golang.org/x/sync/codereview.cfg new file mode 100644 index 000000000..3f8b14b64 --- /dev/null +++ b/vendor/golang.org/x/sync/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 000000000..533438d91 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,67 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +package errgroup + +import ( + "sync" + + "golang.org/x/net/context" +) + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid and does not cancel on error. +type Group struct { + cancel func() + + wg sync.WaitGroup + + errOnce sync.Once + err error +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancel(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel() + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to return a non-nil error cancels the group; its error will be +// returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup_example_md5all_test.go b/vendor/golang.org/x/sync/errgroup/errgroup_example_md5all_test.go new file mode 100644 index 000000000..714b5aea7 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup_example_md5all_test.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errgroup_test + +import ( + "crypto/md5" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" +) + +// Pipeline demonstrates the use of a Group to implement a multi-stage +// pipeline: a version of the MD5All function with bounded parallelism from +// https://blog.golang.org/pipelines. +func ExampleGroup_pipeline() { + m, err := MD5All(context.Background(), ".") + if err != nil { + log.Fatal(err) + } + + for k, sum := range m { + fmt.Printf("%s:\t%x\n", k, sum) + } +} + +type result struct { + path string + sum [md5.Size]byte +} + +// MD5All reads all the files in the file tree rooted at root and returns a map +// from file path to the MD5 sum of the file's contents. If the directory walk +// fails or any read operation fails, MD5All returns an error. +func MD5All(ctx context.Context, root string) (map[string][md5.Size]byte, error) { + // ctx is canceled when g.Wait() returns. When this version of MD5All returns + // - even in case of error! - we know that all of the goroutines have finished + // and the memory they were using can be garbage-collected. + g, ctx := errgroup.WithContext(ctx) + paths := make(chan string) + + g.Go(func() error { + defer close(paths) + return filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.Mode().IsRegular() { + return nil + } + select { + case paths <- path: + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + }) + + // Start a fixed number of goroutines to read and digest files. + c := make(chan result) + const numDigesters = 20 + for i := 0; i < numDigesters; i++ { + g.Go(func() error { + for path := range paths { + data, err := ioutil.ReadFile(path) + if err != nil { + return err + } + select { + case c <- result{path, md5.Sum(data)}: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil + }) + } + go func() { + g.Wait() + close(c) + }() + + m := make(map[string][md5.Size]byte) + for r := range c { + m[r.path] = r.sum + } + // Check whether any of the goroutines failed. Since g is accumulating the + // errors, we don't need to send them (or check for them) in the individual + // results sent on the channel. + if err := g.Wait(); err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup_test.go b/vendor/golang.org/x/sync/errgroup/errgroup_test.go new file mode 100644 index 000000000..6a9696efc --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup_test.go @@ -0,0 +1,176 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errgroup_test + +import ( + "errors" + "fmt" + "net/http" + "os" + "testing" + + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" +) + +var ( + Web = fakeSearch("web") + Image = fakeSearch("image") + Video = fakeSearch("video") +) + +type Result string +type Search func(ctx context.Context, query string) (Result, error) + +func fakeSearch(kind string) Search { + return func(_ context.Context, query string) (Result, error) { + return Result(fmt.Sprintf("%s result for %q", kind, query)), nil + } +} + +// JustErrors illustrates the use of a Group in place of a sync.WaitGroup to +// simplify goroutine counting and error handling. This example is derived from +// the sync.WaitGroup example at https://golang.org/pkg/sync/#example_WaitGroup. +func ExampleGroup_justErrors() { + var g errgroup.Group + var urls = []string{ + "http://www.golang.org/", + "http://www.google.com/", + "http://www.somestupidname.com/", + } + for _, url := range urls { + // Launch a goroutine to fetch the URL. + url := url // https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + // Fetch the URL. + resp, err := http.Get(url) + if err == nil { + resp.Body.Close() + } + return err + }) + } + // Wait for all HTTP fetches to complete. + if err := g.Wait(); err == nil { + fmt.Println("Successfully fetched all URLs.") + } +} + +// Parallel illustrates the use of a Group for synchronizing a simple parallel +// task: the "Google Search 2.0" function from +// https://talks.golang.org/2012/concurrency.slide#46, augmented with a Context +// and error-handling. +func ExampleGroup_parallel() { + Google := func(ctx context.Context, query string) ([]Result, error) { + g, ctx := errgroup.WithContext(ctx) + + searches := []Search{Web, Image, Video} + results := make([]Result, len(searches)) + for i, search := range searches { + i, search := i, search // https://golang.org/doc/faq#closures_and_goroutines + g.Go(func() error { + result, err := search(ctx, query) + if err == nil { + results[i] = result + } + return err + }) + } + if err := g.Wait(); err != nil { + return nil, err + } + return results, nil + } + + results, err := Google(context.Background(), "golang") + if err != nil { + fmt.Fprintln(os.Stderr, err) + return + } + for _, result := range results { + fmt.Println(result) + } + + // Output: + // web result for "golang" + // image result for "golang" + // video result for "golang" +} + +func TestZeroGroup(t *testing.T) { + err1 := errors.New("errgroup_test: 1") + err2 := errors.New("errgroup_test: 2") + + cases := []struct { + errs []error + }{ + {errs: []error{}}, + {errs: []error{nil}}, + {errs: []error{err1}}, + {errs: []error{err1, nil}}, + {errs: []error{err1, nil, err2}}, + } + + for _, tc := range cases { + var g errgroup.Group + + var firstErr error + for i, err := range tc.errs { + err := err + g.Go(func() error { return err }) + + if firstErr == nil && err != nil { + firstErr = err + } + + if gErr := g.Wait(); gErr != firstErr { + t.Errorf("after %T.Go(func() error { return err }) for err in %v\n"+ + "g.Wait() = %v; want %v", + g, tc.errs[:i+1], err, firstErr) + } + } + } +} + +func TestWithContext(t *testing.T) { + errDoom := errors.New("group_test: doomed") + + cases := []struct { + errs []error + want error + }{ + {want: nil}, + {errs: []error{nil}, want: nil}, + {errs: []error{errDoom}, want: errDoom}, + {errs: []error{errDoom, nil}, want: errDoom}, + } + + for _, tc := range cases { + g, ctx := errgroup.WithContext(context.Background()) + + for _, err := range tc.errs { + err := err + g.Go(func() error { return err }) + } + + if err := g.Wait(); err != tc.want { + t.Errorf("after %T.Go(func() error { return err }) for err in %v\n"+ + "g.Wait() = %v; want %v", + g, tc.errs, err, tc.want) + } + + canceled := false + select { + case <-ctx.Done(): + canceled = true + default: + } + if !canceled { + t.Errorf("after %T.Go(func() error { return err }) for err in %v\n"+ + "ctx.Done() was not closed", + g, tc.errs) + } + } +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 000000000..e9d2d79a9 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,131 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "sync" + + // Use the old context because packages that depend on this one + // (e.g. cloud.google.com/go/...) must run on Go 1.6. + // TODO(jba): update to "context" when possible. + "golang.org/x/net/context" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking only until ctx +// is done. On success, returns nil. On failure, returns ctx.Err() and leaves +// the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + s.waiters.Remove(elem) + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: bad release") + } + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } + s.mu.Unlock() +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore_bench_test.go b/vendor/golang.org/x/sync/semaphore/semaphore_bench_test.go new file mode 100644 index 000000000..1e3ab75f5 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore_bench_test.go @@ -0,0 +1,131 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package semaphore_test + +import ( + "fmt" + "testing" + + "golang.org/x/net/context" + "golang.org/x/sync/semaphore" +) + +// weighted is an interface matching a subset of *Weighted. It allows +// alternate implementations for testing and benchmarking. +type weighted interface { + Acquire(context.Context, int64) error + TryAcquire(int64) bool + Release(int64) +} + +// semChan implements Weighted using a channel for +// comparing against the condition variable-based implementation. +type semChan chan struct{} + +func newSemChan(n int64) semChan { + return semChan(make(chan struct{}, n)) +} + +func (s semChan) Acquire(_ context.Context, n int64) error { + for i := int64(0); i < n; i++ { + s <- struct{}{} + } + return nil +} + +func (s semChan) TryAcquire(n int64) bool { + if int64(len(s))+n > int64(cap(s)) { + return false + } + + for i := int64(0); i < n; i++ { + s <- struct{}{} + } + return true +} + +func (s semChan) Release(n int64) { + for i := int64(0); i < n; i++ { + <-s + } +} + +// acquireN calls Acquire(size) on sem N times and then calls Release(size) N times. +func acquireN(b *testing.B, sem weighted, size int64, N int) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < N; j++ { + sem.Acquire(context.Background(), size) + } + for j := 0; j < N; j++ { + sem.Release(size) + } + } +} + +// tryAcquireN calls TryAcquire(size) on sem N times and then calls Release(size) N times. +func tryAcquireN(b *testing.B, sem weighted, size int64, N int) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < N; j++ { + if !sem.TryAcquire(size) { + b.Fatalf("TryAcquire(%v) = false, want true", size) + } + } + for j := 0; j < N; j++ { + sem.Release(size) + } + } +} + +func BenchmarkNewSeq(b *testing.B) { + for _, cap := range []int64{1, 128} { + b.Run(fmt.Sprintf("Weighted-%d", cap), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = semaphore.NewWeighted(cap) + } + }) + b.Run(fmt.Sprintf("semChan-%d", cap), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = newSemChan(cap) + } + }) + } +} + +func BenchmarkAcquireSeq(b *testing.B) { + for _, c := range []struct { + cap, size int64 + N int + }{ + {1, 1, 1}, + {2, 1, 1}, + {16, 1, 1}, + {128, 1, 1}, + {2, 2, 1}, + {16, 2, 8}, + {128, 2, 64}, + {2, 1, 2}, + {16, 8, 2}, + {128, 64, 2}, + } { + for _, w := range []struct { + name string + w weighted + }{ + {"Weighted", semaphore.NewWeighted(c.cap)}, + {"semChan", newSemChan(c.cap)}, + } { + b.Run(fmt.Sprintf("%s-acquire-%d-%d-%d", w.name, c.cap, c.size, c.N), func(b *testing.B) { + acquireN(b, w.w, c.size, c.N) + }) + b.Run(fmt.Sprintf("%s-tryAcquire-%d-%d-%d", w.name, c.cap, c.size, c.N), func(b *testing.B) { + tryAcquireN(b, w.w, c.size, c.N) + }) + } + } +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore_example_test.go b/vendor/golang.org/x/sync/semaphore/semaphore_example_test.go new file mode 100644 index 000000000..e75cd79f5 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore_example_test.go @@ -0,0 +1,84 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package semaphore_test + +import ( + "context" + "fmt" + "log" + "runtime" + + "golang.org/x/sync/semaphore" +) + +// Example_workerPool demonstrates how to use a semaphore to limit the number of +// goroutines working on parallel tasks. +// +// This use of a semaphore mimics a typical “worker pool” pattern, but without +// the need to explicitly shut down idle workers when the work is done. +func Example_workerPool() { + ctx := context.TODO() + + var ( + maxWorkers = runtime.GOMAXPROCS(0) + sem = semaphore.NewWeighted(int64(maxWorkers)) + out = make([]int, 32) + ) + + // Compute the output using up to maxWorkers goroutines at a time. + for i := range out { + // When maxWorkers goroutines are in flight, Acquire blocks until one of the + // workers finishes. + if err := sem.Acquire(ctx, 1); err != nil { + log.Printf("Failed to acquire semaphore: %v", err) + break + } + + go func(i int) { + defer sem.Release(1) + out[i] = collatzSteps(i + 1) + }(i) + } + + // Acquire all of the tokens to wait for any remaining workers to finish. + // + // If you are already waiting for the workers by some other means (such as an + // errgroup.Group), you can omit this final Acquire call. + if err := sem.Acquire(ctx, int64(maxWorkers)); err != nil { + log.Printf("Failed to acquire semaphore: %v", err) + } + + fmt.Println(out) + + // Output: + // [0 1 7 2 5 8 16 3 19 6 14 9 9 17 17 4 12 20 20 7 7 15 15 10 23 10 111 18 18 18 106 5] +} + +// collatzSteps computes the number of steps to reach 1 under the Collatz +// conjecture. (See https://en.wikipedia.org/wiki/Collatz_conjecture.) +func collatzSteps(n int) (steps int) { + if n <= 0 { + panic("nonpositive input") + } + + for ; n > 1; steps++ { + if steps < 0 { + panic("too many steps") + } + + if n%2 == 0 { + n /= 2 + continue + } + + const maxInt = int(^uint(0) >> 1) + if n > (maxInt-1)/3 { + panic("overflow") + } + n = 3*n + 1 + } + + return steps +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore_test.go b/vendor/golang.org/x/sync/semaphore/semaphore_test.go new file mode 100644 index 000000000..2541b9068 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore_test.go @@ -0,0 +1,171 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package semaphore_test + +import ( + "math/rand" + "runtime" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +const maxSleep = 1 * time.Millisecond + +func HammerWeighted(sem *semaphore.Weighted, n int64, loops int) { + for i := 0; i < loops; i++ { + sem.Acquire(context.Background(), n) + time.Sleep(time.Duration(rand.Int63n(int64(maxSleep/time.Nanosecond))) * time.Nanosecond) + sem.Release(n) + } +} + +func TestWeighted(t *testing.T) { + t.Parallel() + + n := runtime.GOMAXPROCS(0) + loops := 10000 / n + sem := semaphore.NewWeighted(int64(n)) + var wg sync.WaitGroup + wg.Add(n) + for i := 0; i < n; i++ { + i := i + go func() { + defer wg.Done() + HammerWeighted(sem, int64(i), loops) + }() + } + wg.Wait() +} + +func TestWeightedPanic(t *testing.T) { + t.Parallel() + + defer func() { + if recover() == nil { + t.Fatal("release of an unacquired weighted semaphore did not panic") + } + }() + w := semaphore.NewWeighted(1) + w.Release(1) +} + +func TestWeightedTryAcquire(t *testing.T) { + t.Parallel() + + ctx := context.Background() + sem := semaphore.NewWeighted(2) + tries := []bool{} + sem.Acquire(ctx, 1) + tries = append(tries, sem.TryAcquire(1)) + tries = append(tries, sem.TryAcquire(1)) + + sem.Release(2) + + tries = append(tries, sem.TryAcquire(1)) + sem.Acquire(ctx, 1) + tries = append(tries, sem.TryAcquire(1)) + + want := []bool{true, false, true, false} + for i := range tries { + if tries[i] != want[i] { + t.Errorf("tries[%d]: got %t, want %t", i, tries[i], want[i]) + } + } +} + +func TestWeightedAcquire(t *testing.T) { + t.Parallel() + + ctx := context.Background() + sem := semaphore.NewWeighted(2) + tryAcquire := func(n int64) bool { + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + return sem.Acquire(ctx, n) == nil + } + + tries := []bool{} + sem.Acquire(ctx, 1) + tries = append(tries, tryAcquire(1)) + tries = append(tries, tryAcquire(1)) + + sem.Release(2) + + tries = append(tries, tryAcquire(1)) + sem.Acquire(ctx, 1) + tries = append(tries, tryAcquire(1)) + + want := []bool{true, false, true, false} + for i := range tries { + if tries[i] != want[i] { + t.Errorf("tries[%d]: got %t, want %t", i, tries[i], want[i]) + } + } +} + +func TestWeightedDoesntBlockIfTooBig(t *testing.T) { + t.Parallel() + + const n = 2 + sem := semaphore.NewWeighted(n) + { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go sem.Acquire(ctx, n+1) + } + + g, ctx := errgroup.WithContext(context.Background()) + for i := n * 3; i > 0; i-- { + g.Go(func() error { + err := sem.Acquire(ctx, 1) + if err == nil { + time.Sleep(1 * time.Millisecond) + sem.Release(1) + } + return err + }) + } + if err := g.Wait(); err != nil { + t.Errorf("semaphore.NewWeighted(%v) failed to AcquireCtx(_, 1) with AcquireCtx(_, %v) pending", n, n+1) + } +} + +// TestLargeAcquireDoesntStarve times out if a large call to Acquire starves. +// Merely returning from the test function indicates success. +func TestLargeAcquireDoesntStarve(t *testing.T) { + t.Parallel() + + ctx := context.Background() + n := int64(runtime.GOMAXPROCS(0)) + sem := semaphore.NewWeighted(n) + running := true + + var wg sync.WaitGroup + wg.Add(int(n)) + for i := n; i > 0; i-- { + sem.Acquire(ctx, 1) + go func() { + defer func() { + sem.Release(1) + wg.Done() + }() + for running { + time.Sleep(1 * time.Millisecond) + sem.Release(1) + sem.Acquire(ctx, 1) + } + }() + } + + sem.Acquire(ctx, n) + running = false + sem.Release(n) + wg.Wait() +} diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go new file mode 100644 index 000000000..9a4f8d59e --- /dev/null +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -0,0 +1,111 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight // import "golang.org/x/sync/singleflight" + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + delete(g.m, key) + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/golang.org/x/sync/singleflight/singleflight_test.go b/vendor/golang.org/x/sync/singleflight/singleflight_test.go new file mode 100644 index 000000000..5e6f1b328 --- /dev/null +++ b/vendor/golang.org/x/sync/singleflight/singleflight_test.go @@ -0,0 +1,87 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package singleflight + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" +) + +func TestDo(t *testing.T) { + var g Group + v, err, _ := g.Do("key", func() (interface{}, error) { + return "bar", nil + }) + if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want { + t.Errorf("Do = %v; want %v", got, want) + } + if err != nil { + t.Errorf("Do error = %v", err) + } +} + +func TestDoErr(t *testing.T) { + var g Group + someErr := errors.New("Some error") + v, err, _ := g.Do("key", func() (interface{}, error) { + return nil, someErr + }) + if err != someErr { + t.Errorf("Do error = %v; want someErr %v", err, someErr) + } + if v != nil { + t.Errorf("unexpected non-nil value %#v", v) + } +} + +func TestDoDupSuppress(t *testing.T) { + var g Group + var wg1, wg2 sync.WaitGroup + c := make(chan string, 1) + var calls int32 + fn := func() (interface{}, error) { + if atomic.AddInt32(&calls, 1) == 1 { + // First invocation. + wg1.Done() + } + v := <-c + c <- v // pump; make available for any future calls + + time.Sleep(10 * time.Millisecond) // let more goroutines enter Do + + return v, nil + } + + const n = 10 + wg1.Add(1) + for i := 0; i < n; i++ { + wg1.Add(1) + wg2.Add(1) + go func() { + defer wg2.Done() + wg1.Done() + v, err, _ := g.Do("key", fn) + if err != nil { + t.Errorf("Do error: %v", err) + return + } + if s, _ := v.(string); s != "bar" { + t.Errorf("Do = %T %v; want %q", v, v, "bar") + } + }() + } + wg1.Wait() + // At least one goroutine is in fn now and all of them have at + // least reached the line before the Do. + c <- "bar" + wg2.Wait() + if got := atomic.LoadInt32(&calls); got <= 0 || got >= n { + t.Errorf("number of calls = %d; want over 0 and less than %d", got, n) + } +} diff --git a/vendor/golang.org/x/sync/syncmap/map.go b/vendor/golang.org/x/sync/syncmap/map.go new file mode 100644 index 000000000..80e15847e --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/map.go @@ -0,0 +1,372 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package syncmap provides a concurrent map implementation. +// It is a prototype for a proposed addition to the sync package +// in the standard library. +// (https://golang.org/issue/18177) +package syncmap + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// Map is a concurrent map with amortized-constant-time loads, stores, and deletes. +// It is safe for multiple goroutines to call a Map's methods concurrently. +// +// The zero Map is valid and empty. +// +// A Map must not be copied after first use. +type Map struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-expunged entry requires that the entry be copied to the dirty + // map and unexpunged with mu held. + read atomic.Value // readOnly + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-expunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An expunged entry in the + // clean map must be unexpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[interface{}]*entry + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// readOnly is an immutable struct stored atomically in the Map.read field. +type readOnly struct { + m map[interface{}]*entry + amended bool // true if the dirty map contains some key not in m. +} + +// expunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +var expunged = unsafe.Pointer(new(interface{})) + +// An entry is a slot in the map corresponding to a particular key. +type entry struct { + // p points to the interface{} value stored for the entry. + // + // If p == nil, the entry has been deleted and m.dirty == nil. + // + // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry + // is missing from m.dirty. + // + // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An entry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with expunged and leave + // m.dirty[key] unset. + // + // An entry's associated value can be updated by atomic replacement, provided + // p != expunged. If p == expunged, an entry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the entry. + p unsafe.Pointer // *interface{} +} + +func newEntry(i interface{}) *entry { + return &entry{p: unsafe.Pointer(&i)} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *Map) Load(key interface{}) (value interface{}, ok bool) { + read, _ := m.read.Load().(readOnly) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read, _ = m.read.Load().(readOnly) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return nil, false + } + return e.load() +} + +func (e *entry) load() (value interface{}, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return nil, false + } + return *(*interface{})(p), true +} + +// Store sets the value for a key. +func (m *Map) Store(key, value interface{}) { + read, _ := m.read.Load().(readOnly) + if e, ok := read.m[key]; ok && e.tryStore(&value) { + return + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The entry was previously expunged, which implies that there is a + // non-nil dirty map and this entry is not in it. + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnly{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + } + m.mu.Unlock() +} + +// tryStore stores a value if the entry has not been expunged. +// +// If the entry is expunged, tryStore returns false and leaves the entry +// unchanged. +func (e *entry) tryStore(i *interface{}) bool { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return false + } + for { + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + p = atomic.LoadPointer(&e.p) + if p == expunged { + return false + } + } +} + +// unexpungeLocked ensures that the entry is not marked as expunged. +// +// If the entry was previously expunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *entry) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expunged, nil) +} + +// storeLocked unconditionally stores a value to the entry. +// +// The entry must be known not to be expunged. +func (e *entry) storeLocked(i *interface{}) { + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { + // Avoid locking if it's a clean hit. + read, _ := m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnly{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the entry is not +// expunged. +// +// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and +// returns with ok==false. +func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return nil, false, false + } + if p != nil { + return *(*interface{})(p), true, true + } + + // Copy the interface after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the entry is expunged, we + // shouldn't bother heap-allocating. + ic := i + for { + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expunged { + return nil, false, false + } + if p != nil { + return *(*interface{})(p), true, true + } + } +} + +// Delete deletes the value for a key. +func (m *Map) Delete(key interface{}) { + read, _ := m.read.Load().(readOnly) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + e, ok = read.m[key] + if !ok && read.amended { + delete(m.dirty, key) + } + m.mu.Unlock() + } + if ok { + e.delete() + } +} + +func (e *entry) delete() (hadValue bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return true + } + } +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *Map) Range(f func(key, value interface{}) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read, _ := m.read.Load().(readOnly) + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if read.amended { + read = readOnly{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *Map) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnly{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *Map) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnly) + m.dirty = make(map[interface{}]*entry, len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entry) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expunged +} diff --git a/vendor/golang.org/x/sync/syncmap/map_bench_test.go b/vendor/golang.org/x/sync/syncmap/map_bench_test.go new file mode 100644 index 000000000..b279b4f74 --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/map_bench_test.go @@ -0,0 +1,216 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syncmap_test + +import ( + "fmt" + "reflect" + "sync/atomic" + "testing" + + "golang.org/x/sync/syncmap" +) + +type bench struct { + setup func(*testing.B, mapInterface) + perG func(b *testing.B, pb *testing.PB, i int, m mapInterface) +} + +func benchMap(b *testing.B, bench bench) { + for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &syncmap.Map{}} { + b.Run(fmt.Sprintf("%T", m), func(b *testing.B) { + m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface) + if bench.setup != nil { + bench.setup(b, m) + } + + b.ResetTimer() + + var i int64 + b.RunParallel(func(pb *testing.PB) { + id := int(atomic.AddInt64(&i, 1) - 1) + bench.perG(b, pb, id*b.N, m) + }) + }) + } +} + +func BenchmarkLoadMostlyHits(b *testing.B) { + const hits, misses = 1023, 1 + + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + for i := 0; i < hits; i++ { + m.LoadOrStore(i, i) + } + // Prime the map to get it into a steady state. + for i := 0; i < hits*2; i++ { + m.Load(i % hits) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.Load(i % (hits + misses)) + } + }, + }) +} + +func BenchmarkLoadMostlyMisses(b *testing.B) { + const hits, misses = 1, 1023 + + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + for i := 0; i < hits; i++ { + m.LoadOrStore(i, i) + } + // Prime the map to get it into a steady state. + for i := 0; i < hits*2; i++ { + m.Load(i % hits) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.Load(i % (hits + misses)) + } + }, + }) +} + +func BenchmarkLoadOrStoreBalanced(b *testing.B) { + const hits, misses = 128, 128 + + benchMap(b, bench{ + setup: func(b *testing.B, m mapInterface) { + if _, ok := m.(*DeepCopyMap); ok { + b.Skip("DeepCopyMap has quadratic running time.") + } + for i := 0; i < hits; i++ { + m.LoadOrStore(i, i) + } + // Prime the map to get it into a steady state. + for i := 0; i < hits*2; i++ { + m.Load(i % hits) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + j := i % (hits + misses) + if j < hits { + if _, ok := m.LoadOrStore(j, i); !ok { + b.Fatalf("unexpected miss for %v", j) + } + } else { + if v, loaded := m.LoadOrStore(i, i); loaded { + b.Fatalf("failed to store %v: existing value %v", i, v) + } + } + } + }, + }) +} + +func BenchmarkLoadOrStoreUnique(b *testing.B) { + benchMap(b, bench{ + setup: func(b *testing.B, m mapInterface) { + if _, ok := m.(*DeepCopyMap); ok { + b.Skip("DeepCopyMap has quadratic running time.") + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.LoadOrStore(i, i) + } + }, + }) +} + +func BenchmarkLoadOrStoreCollision(b *testing.B) { + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + m.LoadOrStore(0, 0) + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.LoadOrStore(0, 0) + } + }, + }) +} + +func BenchmarkRange(b *testing.B) { + const mapSize = 1 << 10 + + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + for i := 0; i < mapSize; i++ { + m.Store(i, i) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.Range(func(_, _ interface{}) bool { return true }) + } + }, + }) +} + +// BenchmarkAdversarialAlloc tests performance when we store a new value +// immediately whenever the map is promoted to clean and otherwise load a +// unique, missing key. +// +// This forces the Load calls to always acquire the map's mutex. +func BenchmarkAdversarialAlloc(b *testing.B) { + benchMap(b, bench{ + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + var stores, loadsSinceStore int64 + for ; pb.Next(); i++ { + m.Load(i) + if loadsSinceStore++; loadsSinceStore > stores { + m.LoadOrStore(i, stores) + loadsSinceStore = 0 + stores++ + } + } + }, + }) +} + +// BenchmarkAdversarialDelete tests performance when we periodically delete +// one key and add a different one in a large map. +// +// This forces the Load calls to always acquire the map's mutex and periodically +// makes a full copy of the map despite changing only one entry. +func BenchmarkAdversarialDelete(b *testing.B) { + const mapSize = 1 << 10 + + benchMap(b, bench{ + setup: func(_ *testing.B, m mapInterface) { + for i := 0; i < mapSize; i++ { + m.Store(i, i) + } + }, + + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + m.Load(i) + + if i%mapSize == 0 { + m.Range(func(k, _ interface{}) bool { + m.Delete(k) + return false + }) + m.Store(i, i) + } + } + }, + }) +} diff --git a/vendor/golang.org/x/sync/syncmap/map_reference_test.go b/vendor/golang.org/x/sync/syncmap/map_reference_test.go new file mode 100644 index 000000000..923c51b70 --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/map_reference_test.go @@ -0,0 +1,151 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syncmap_test + +import ( + "sync" + "sync/atomic" +) + +// This file contains reference map implementations for unit-tests. + +// mapInterface is the interface Map implements. +type mapInterface interface { + Load(interface{}) (interface{}, bool) + Store(key, value interface{}) + LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) + Delete(interface{}) + Range(func(key, value interface{}) (shouldContinue bool)) +} + +// RWMutexMap is an implementation of mapInterface using a sync.RWMutex. +type RWMutexMap struct { + mu sync.RWMutex + dirty map[interface{}]interface{} +} + +func (m *RWMutexMap) Load(key interface{}) (value interface{}, ok bool) { + m.mu.RLock() + value, ok = m.dirty[key] + m.mu.RUnlock() + return +} + +func (m *RWMutexMap) Store(key, value interface{}) { + m.mu.Lock() + if m.dirty == nil { + m.dirty = make(map[interface{}]interface{}) + } + m.dirty[key] = value + m.mu.Unlock() +} + +func (m *RWMutexMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { + m.mu.Lock() + actual, loaded = m.dirty[key] + if !loaded { + actual = value + if m.dirty == nil { + m.dirty = make(map[interface{}]interface{}) + } + m.dirty[key] = value + } + m.mu.Unlock() + return actual, loaded +} + +func (m *RWMutexMap) Delete(key interface{}) { + m.mu.Lock() + delete(m.dirty, key) + m.mu.Unlock() +} + +func (m *RWMutexMap) Range(f func(key, value interface{}) (shouldContinue bool)) { + m.mu.RLock() + keys := make([]interface{}, 0, len(m.dirty)) + for k := range m.dirty { + keys = append(keys, k) + } + m.mu.RUnlock() + + for _, k := range keys { + v, ok := m.Load(k) + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +// DeepCopyMap is an implementation of mapInterface using a Mutex and +// atomic.Value. It makes deep copies of the map on every write to avoid +// acquiring the Mutex in Load. +type DeepCopyMap struct { + mu sync.Mutex + clean atomic.Value +} + +func (m *DeepCopyMap) Load(key interface{}) (value interface{}, ok bool) { + clean, _ := m.clean.Load().(map[interface{}]interface{}) + value, ok = clean[key] + return value, ok +} + +func (m *DeepCopyMap) Store(key, value interface{}) { + m.mu.Lock() + dirty := m.dirty() + dirty[key] = value + m.clean.Store(dirty) + m.mu.Unlock() +} + +func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { + clean, _ := m.clean.Load().(map[interface{}]interface{}) + actual, loaded = clean[key] + if loaded { + return actual, loaded + } + + m.mu.Lock() + // Reload clean in case it changed while we were waiting on m.mu. + clean, _ = m.clean.Load().(map[interface{}]interface{}) + actual, loaded = clean[key] + if !loaded { + dirty := m.dirty() + dirty[key] = value + actual = value + m.clean.Store(dirty) + } + m.mu.Unlock() + return actual, loaded +} + +func (m *DeepCopyMap) Delete(key interface{}) { + m.mu.Lock() + dirty := m.dirty() + delete(dirty, key) + m.clean.Store(dirty) + m.mu.Unlock() +} + +func (m *DeepCopyMap) Range(f func(key, value interface{}) (shouldContinue bool)) { + clean, _ := m.clean.Load().(map[interface{}]interface{}) + for k, v := range clean { + if !f(k, v) { + break + } + } +} + +func (m *DeepCopyMap) dirty() map[interface{}]interface{} { + clean, _ := m.clean.Load().(map[interface{}]interface{}) + dirty := make(map[interface{}]interface{}, len(clean)+1) + for k, v := range clean { + dirty[k] = v + } + return dirty +} diff --git a/vendor/golang.org/x/sync/syncmap/map_test.go b/vendor/golang.org/x/sync/syncmap/map_test.go new file mode 100644 index 000000000..c883f176f --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/map_test.go @@ -0,0 +1,172 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syncmap_test + +import ( + "math/rand" + "reflect" + "runtime" + "sync" + "testing" + "testing/quick" + + "golang.org/x/sync/syncmap" +) + +type mapOp string + +const ( + opLoad = mapOp("Load") + opStore = mapOp("Store") + opLoadOrStore = mapOp("LoadOrStore") + opDelete = mapOp("Delete") +) + +var mapOps = [...]mapOp{opLoad, opStore, opLoadOrStore, opDelete} + +// mapCall is a quick.Generator for calls on mapInterface. +type mapCall struct { + op mapOp + k, v interface{} +} + +func (c mapCall) apply(m mapInterface) (interface{}, bool) { + switch c.op { + case opLoad: + return m.Load(c.k) + case opStore: + m.Store(c.k, c.v) + return nil, false + case opLoadOrStore: + return m.LoadOrStore(c.k, c.v) + case opDelete: + m.Delete(c.k) + return nil, false + default: + panic("invalid mapOp") + } +} + +type mapResult struct { + value interface{} + ok bool +} + +func randValue(r *rand.Rand) interface{} { + b := make([]byte, r.Intn(4)) + for i := range b { + b[i] = 'a' + byte(rand.Intn(26)) + } + return string(b) +} + +func (mapCall) Generate(r *rand.Rand, size int) reflect.Value { + c := mapCall{op: mapOps[rand.Intn(len(mapOps))], k: randValue(r)} + switch c.op { + case opStore, opLoadOrStore: + c.v = randValue(r) + } + return reflect.ValueOf(c) +} + +func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[interface{}]interface{}) { + for _, c := range calls { + v, ok := c.apply(m) + results = append(results, mapResult{v, ok}) + } + + final = make(map[interface{}]interface{}) + m.Range(func(k, v interface{}) bool { + final[k] = v + return true + }) + + return results, final +} + +func applyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { + return applyCalls(new(syncmap.Map), calls) +} + +func applyRWMutexMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { + return applyCalls(new(RWMutexMap), calls) +} + +func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { + return applyCalls(new(DeepCopyMap), calls) +} + +func TestMapMatchesRWMutex(t *testing.T) { + if err := quick.CheckEqual(applyMap, applyRWMutexMap, nil); err != nil { + t.Error(err) + } +} + +func TestMapMatchesDeepCopy(t *testing.T) { + if err := quick.CheckEqual(applyMap, applyDeepCopyMap, nil); err != nil { + t.Error(err) + } +} + +func TestConcurrentRange(t *testing.T) { + const mapSize = 1 << 10 + + m := new(syncmap.Map) + for n := int64(1); n <= mapSize; n++ { + m.Store(n, int64(n)) + } + + done := make(chan struct{}) + var wg sync.WaitGroup + defer func() { + close(done) + wg.Wait() + }() + for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- { + r := rand.New(rand.NewSource(g)) + wg.Add(1) + go func(g int64) { + defer wg.Done() + for i := int64(0); ; i++ { + select { + case <-done: + return + default: + } + for n := int64(1); n < mapSize; n++ { + if r.Int63n(mapSize) == 0 { + m.Store(n, n*i*g) + } else { + m.Load(n) + } + } + } + }(g) + } + + iters := 1 << 10 + if testing.Short() { + iters = 16 + } + for n := iters; n > 0; n-- { + seen := make(map[int64]bool, mapSize) + + m.Range(func(ki, vi interface{}) bool { + k, v := ki.(int64), vi.(int64) + if v%k != 0 { + t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v) + } + if seen[k] { + t.Fatalf("Range visited key %v twice", k) + } + seen[k] = true + return true + }) + + if len(seen) != mapSize { + t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize) + } + } +} diff --git a/vendor/gopkg.in/go-playground/assert.v1/LICENSE b/vendor/gopkg.in/go-playground/assert.v1/LICENSE new file mode 100644 index 000000000..6a2ae9aa4 --- /dev/null +++ b/vendor/gopkg.in/go-playground/assert.v1/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Dean Karn + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/gopkg.in/go-playground/assert.v1/README.md b/vendor/gopkg.in/go-playground/assert.v1/README.md new file mode 100644 index 000000000..6e40ab37b --- /dev/null +++ b/vendor/gopkg.in/go-playground/assert.v1/README.md @@ -0,0 +1,90 @@ +Package assert +============== + +[![Build Status](https://semaphoreci.com/api/v1/projects/d5e4f510-5e1e-48d9-b38b-f447f270a057/488336/badge.svg)](https://semaphoreci.com/joeybloggs/assert) +[![GoDoc](https://godoc.org/gopkg.in/go-playground/assert.v1?status.svg)](https://godoc.org/gopkg.in/go-playground/assert.v1) + +Package assert is a Basic Assertion library used along side native go testing + +Installation +------------ + +Use go get. + + go get gopkg.in/go-playground/assert.v1 + +or to update + + go get -u gopkg.in/go-playground/assert.v1 + +Then import the validator package into your own code. + + import . "gopkg.in/go-playground/assert.v1" + +Usage and documentation +------ + +Please see http://godoc.org/gopkg.in/go-playground/assert.v1 for detailed usage docs. + +##### Example: +```go +package whatever + +import ( + "errors" + "testing" + . "gopkg.in/go-playground/assert.v1" +) + +func AssertCustomErrorHandler(t *testing.T, errs map[string]string, key, expected string) { + val, ok := errs[key] + + // using EqualSkip and NotEqualSkip as building blocks for my custom Assert function + EqualSkip(t, 2, ok, true) + NotEqualSkip(t, 2, val, nil) + EqualSkip(t, 2, val, expected) +} + +func TestEqual(t *testing.T) { + + // error comes from your package/library + err := errors.New("my error") + NotEqual(t, err, nil) + Equal(t, err.Error(), "my error") + + err = nil + Equal(t, err, nil) + + fn := func() { + panic("omg omg omg!") + } + + PanicMatches(t, func() { fn() }, "omg omg omg!") + PanicMatches(t, func() { panic("omg omg omg!") }, "omg omg omg!") + + // errs would have come from your package/library + errs := map[string]string{} + errs["Name"] = "User Name Invalid" + errs["Email"] = "User Email Invalid" + + AssertCustomErrorHandler(t, errs, "Name", "User Name Invalid") + AssertCustomErrorHandler(t, errs, "Email", "User Email Invalid") +} +``` + +How to Contribute +------ + +There will always be a development branch for each version i.e. `v1-development`. In order to contribute, +please make your pull requests against those branches. + +If the changes being proposed or requested are breaking changes, please create an issue, for discussion +or create a pull request against the highest development branch for example this package has a +v1 and v1-development branch however, there will also be a v2-development brach even though v2 doesn't exist yet. + +I strongly encourage everyone whom creates a usefull custom assertion function to contribute them and +help make this package even better. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/vendor/gopkg.in/go-playground/assert.v1/assert.go b/vendor/gopkg.in/go-playground/assert.v1/assert.go new file mode 100644 index 000000000..74a89c53d --- /dev/null +++ b/vendor/gopkg.in/go-playground/assert.v1/assert.go @@ -0,0 +1,197 @@ +package assert + +import ( + "fmt" + "path" + "reflect" + "regexp" + "runtime" + "testing" +) + +// IsEqual returns whether val1 is equal to val2 taking into account Pointers, Interfaces and their underlying types +func IsEqual(val1, val2 interface{}) bool { + v1 := reflect.ValueOf(val1) + v2 := reflect.ValueOf(val2) + + if v1.Kind() == reflect.Ptr { + v1 = v1.Elem() + } + + if v2.Kind() == reflect.Ptr { + v2 = v2.Elem() + } + + if !v1.IsValid() && !v2.IsValid() { + return true + } + + switch v1.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v1.IsNil() { + v1 = reflect.ValueOf(nil) + } + } + + switch v2.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v2.IsNil() { + v2 = reflect.ValueOf(nil) + } + } + + v1Underlying := reflect.Zero(reflect.TypeOf(v1)).Interface() + v2Underlying := reflect.Zero(reflect.TypeOf(v2)).Interface() + + if v1 == v1Underlying { + if v2 == v2Underlying { + goto CASE4 + } else { + goto CASE3 + } + } else { + if v2 == v2Underlying { + goto CASE2 + } else { + goto CASE1 + } + } + +CASE1: + return reflect.DeepEqual(v1.Interface(), v2.Interface()) +CASE2: + return reflect.DeepEqual(v1.Interface(), v2) +CASE3: + return reflect.DeepEqual(v1, v2.Interface()) +CASE4: + return reflect.DeepEqual(v1, v2) +} + +// NotMatchRegex validates that value matches the regex, either string or *regex +// and throws an error with line number +func NotMatchRegex(t *testing.T, value string, regex interface{}) { + NotMatchRegexSkip(t, 2, value, regex) +} + +// NotMatchRegexSkip validates that value matches the regex, either string or *regex +// and throws an error with line number +// but the skip variable tells NotMatchRegexSkip how far back on the stack to report the error. +// This is a building block to creating your own more complex validation functions. +func NotMatchRegexSkip(t *testing.T, skip int, value string, regex interface{}) { + + if r, ok, err := regexMatches(regex, value); ok || err != nil { + _, file, line, _ := runtime.Caller(skip) + + if err != nil { + fmt.Printf("%s:%d %v error compiling regex %v\n", path.Base(file), line, value, r.String()) + } else { + fmt.Printf("%s:%d %v matches regex %v\n", path.Base(file), line, value, r.String()) + } + + t.FailNow() + } +} + +// MatchRegex validates that value matches the regex, either string or *regex +// and throws an error with line number +func MatchRegex(t *testing.T, value string, regex interface{}) { + MatchRegexSkip(t, 2, value, regex) +} + +// MatchRegexSkip validates that value matches the regex, either string or *regex +// and throws an error with line number +// but the skip variable tells MatchRegexSkip how far back on the stack to report the error. +// This is a building block to creating your own more complex validation functions. +func MatchRegexSkip(t *testing.T, skip int, value string, regex interface{}) { + + if r, ok, err := regexMatches(regex, value); !ok { + _, file, line, _ := runtime.Caller(skip) + + if err != nil { + fmt.Printf("%s:%d %v error compiling regex %v\n", path.Base(file), line, value, r.String()) + } else { + fmt.Printf("%s:%d %v does not match regex %v\n", path.Base(file), line, value, r.String()) + } + + t.FailNow() + } +} + +func regexMatches(regex interface{}, value string) (*regexp.Regexp, bool, error) { + + var err error + + r, ok := regex.(*regexp.Regexp) + + // must be a string + if !ok { + if r, err = regexp.Compile(regex.(string)); err != nil { + return r, false, err + } + } + + return r, r.MatchString(value), err +} + +// Equal validates that val1 is equal to val2 and throws an error with line number +func Equal(t *testing.T, val1, val2 interface{}) { + EqualSkip(t, 2, val1, val2) +} + +// EqualSkip validates that val1 is equal to val2 and throws an error with line number +// but the skip variable tells EqualSkip how far back on the stack to report the error. +// This is a building block to creating your own more complex validation functions. +func EqualSkip(t *testing.T, skip int, val1, val2 interface{}) { + + if !IsEqual(val1, val2) { + _, file, line, _ := runtime.Caller(skip) + fmt.Printf("%s:%d %v does not equal %v\n", path.Base(file), line, val1, val2) + t.FailNow() + } +} + +// NotEqual validates that val1 is not equal val2 and throws an error with line number +func NotEqual(t *testing.T, val1, val2 interface{}) { + NotEqualSkip(t, 2, val1, val2) +} + +// NotEqualSkip validates that val1 is not equal to val2 and throws an error with line number +// but the skip variable tells NotEqualSkip how far back on the stack to report the error. +// This is a building block to creating your own more complex validation functions. +func NotEqualSkip(t *testing.T, skip int, val1, val2 interface{}) { + + if IsEqual(val1, val2) { + _, file, line, _ := runtime.Caller(skip) + fmt.Printf("%s:%d %v should not be equal %v\n", path.Base(file), line, val1, val2) + t.FailNow() + } +} + +// PanicMatches validates that the panic output of running fn matches the supplied string +func PanicMatches(t *testing.T, fn func(), matches string) { + PanicMatchesSkip(t, 2, fn, matches) +} + +// PanicMatchesSkip validates that the panic output of running fn matches the supplied string +// but the skip variable tells PanicMatchesSkip how far back on the stack to report the error. +// This is a building block to creating your own more complex validation functions. +func PanicMatchesSkip(t *testing.T, skip int, fn func(), matches string) { + + _, file, line, _ := runtime.Caller(skip) + + defer func() { + if r := recover(); r != nil { + err := fmt.Sprintf("%s", r) + + if err != matches { + fmt.Printf("%s:%d Panic... expected [%s] received [%s]", path.Base(file), line, matches, err) + t.FailNow() + } + } else { + fmt.Printf("%s:%d Panic Expected, none found... expected [%s]", path.Base(file), line, matches) + t.FailNow() + } + }() + + fn() +} diff --git a/vendor/gopkg.in/go-playground/assert.v1/assert_test.go b/vendor/gopkg.in/go-playground/assert.v1/assert_test.go new file mode 100644 index 000000000..743700e9e --- /dev/null +++ b/vendor/gopkg.in/go-playground/assert.v1/assert_test.go @@ -0,0 +1,92 @@ +package assert + +import ( + "errors" + "testing" +) + +// NOTES: +// - Run "go test" to run tests +// - Run "gocov test | gocov report" to report on test converage by file +// - Run "gocov test | gocov annotate -" to report on all code and functions, those ,marked with "MISS" were never called +// +// or +// +// -- may be a good idea to change to output path to somewherelike /tmp +// go test -coverprofile cover.out && go tool cover -html=cover.out -o cover.html +// + +func MyCustomErrorHandler(t *testing.T, errs map[string]string, key, expected string) { + val, ok := errs[key] + EqualSkip(t, 2, ok, true) + NotEqualSkip(t, 2, val, nil) + EqualSkip(t, 2, val, expected) +} + +func TestRegexMatchAndNotMatch(t *testing.T) { + goodRegex := "^(.*/vendor/)?github.com/go-playground/assert$" + + MatchRegex(t, "github.com/go-playground/assert", goodRegex) + MatchRegex(t, "/vendor/github.com/go-playground/assert", goodRegex) + + NotMatchRegex(t, "/vendor/github.com/go-playground/test", goodRegex) +} + +func TestBasicAllGood(t *testing.T) { + + err := errors.New("my error") + NotEqual(t, err, nil) + Equal(t, err.Error(), "my error") + + err = nil + Equal(t, err, nil) + + fn := func() { + panic("omg omg omg!") + } + + PanicMatches(t, func() { fn() }, "omg omg omg!") + PanicMatches(t, func() { panic("omg omg omg!") }, "omg omg omg!") + + /* if you uncomment creates hard fail, that is expected + // you cant really do this, but it is here for the sake of completeness + fun := func() {} + PanicMatches(t, func() { fun() }, "omg omg omg!") + */ + errs := map[string]string{} + errs["Name"] = "User Name Invalid" + errs["Email"] = "User Email Invalid" + + MyCustomErrorHandler(t, errs, "Name", "User Name Invalid") + MyCustomErrorHandler(t, errs, "Email", "User Email Invalid") +} + +func TestEquals(t *testing.T) { + + type Test struct { + Name string + } + + tst := &Test{ + Name: "joeybloggs", + } + + Equal(t, tst, tst) + + NotEqual(t, tst, nil) + NotEqual(t, nil, tst) + + type TestMap map[string]string + + var tm TestMap + + Equal(t, tm, nil) + Equal(t, nil, tm) + + var iface interface{} + var iface2 interface{} + + iface = 1 + Equal(t, iface, 1) + NotEqual(t, iface, iface2) +} diff --git a/vendor/gopkg.in/go-playground/assert.v1/doc.go b/vendor/gopkg.in/go-playground/assert.v1/doc.go new file mode 100644 index 000000000..896f94aa6 --- /dev/null +++ b/vendor/gopkg.in/go-playground/assert.v1/doc.go @@ -0,0 +1,49 @@ +/* +Package assert provides some basic assertion functions for testing and +also provides the building blocks for creating your own more complex +validations. + + package whatever + + import ( + "errors" + "testing" + . "gopkg.in/go-playground/assert.v1" + ) + + func AssertCustomErrorHandler(t *testing.T, errs map[string]string, key, expected string) { + val, ok := errs[key] + + // using EqualSkip and NotEqualSkip as building blocks for my custom Assert function + EqualSkip(t, 2, ok, true) + NotEqualSkip(t, 2, val, nil) + EqualSkip(t, 2, val, expected) + } + + func TestEqual(t *testing.T) { + + // error comes from your package/library + err := errors.New("my error") + NotEqual(t, err, nil) + Equal(t, err.Error(), "my error") + + err = nil + Equal(t, err, nil) + + fn := func() { + panic("omg omg omg!") + } + + PanicMatches(t, func() { fn() }, "omg omg omg!") + PanicMatches(t, func() { panic("omg omg omg!") }, "omg omg omg!") + + // errs would have come from your package/library + errs := map[string]string{} + errs["Name"] = "User Name Invalid" + errs["Email"] = "User Email Invalid" + + AssertCustomErrorHandler(t, errs, "Name", "User Name Invalid") + AssertCustomErrorHandler(t, errs, "Email", "User Email Invalid") + } +*/ +package assert diff --git a/vendor/gopkg.in/go-playground/validator.v8/LICENSE b/vendor/gopkg.in/go-playground/validator.v8/LICENSE new file mode 100644 index 000000000..6a2ae9aa4 --- /dev/null +++ b/vendor/gopkg.in/go-playground/validator.v8/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Dean Karn + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/gopkg.in/go-playground/validator.v8/README.md b/vendor/gopkg.in/go-playground/validator.v8/README.md new file mode 100644 index 000000000..d3bd9b0f1 --- /dev/null +++ b/vendor/gopkg.in/go-playground/validator.v8/README.md @@ -0,0 +1,366 @@ +Package validator +================ +[![Join the chat at https://gitter.im/bluesuncorp/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +![Project status](https://img.shields.io/badge/version-8.18.2-green.svg) +[![Build Status](https://semaphoreci.com/api/v1/projects/ec20115f-ef1b-4c7d-9393-cc76aba74eb4/530054/badge.svg)](https://semaphoreci.com/joeybloggs/validator) +[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=v8&service=github)](https://coveralls.io/github/go-playground/validator?branch=v8) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) +[![GoDoc](https://godoc.org/gopkg.in/go-playground/validator.v8?status.svg)](https://godoc.org/gopkg.in/go-playground/validator.v8) +![License](https://img.shields.io/dub/l/vibe-d.svg) + +Package validator implements value validations for structs and individual fields based on tags. + +It has the following **unique** features: + +- Cross Field and Cross Struct validations by using validation tags or custom validators. +- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated. +- Handles type interface by determining it's underlying type prior to validation. +- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29) +- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs +- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError + +Installation +------------ + +Use go get. + + go get gopkg.in/go-playground/validator.v8 + +or to update + + go get -u gopkg.in/go-playground/validator.v8 + +Then import the validator package into your own code. + + import "gopkg.in/go-playground/validator.v8" + +Error Return Value +------- + +Validation functions return type error + +They return type error to avoid the issue discussed in the following, where err is always != nil: + +* http://stackoverflow.com/a/29138676/3158232 +* https://github.com/go-playground/validator/issues/134 + +validator only returns nil or ValidationErrors as type error; so in you code all you need to do +is check if the error returned is not nil, and if it's not type cast it to type ValidationErrors +like so: + +```go +err := validate.Struct(mystruct) +validationErrors := err.(validator.ValidationErrors) + ``` + +Usage and documentation +------ + +Please see http://godoc.org/gopkg.in/go-playground/validator.v8 for detailed usage docs. + +##### Examples: + +Struct & Field validation +```go +package main + +import ( + "fmt" + + "gopkg.in/go-playground/validator.v8" +) + +// User contains user information +type User struct { + FirstName string `validate:"required"` + LastName string `validate:"required"` + Age uint8 `validate:"gte=0,lte=130"` + Email string `validate:"required,email"` + FavouriteColor string `validate:"hexcolor|rgb|rgba"` + Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage... +} + +// Address houses a users address information +type Address struct { + Street string `validate:"required"` + City string `validate:"required"` + Planet string `validate:"required"` + Phone string `validate:"required"` +} + +var validate *validator.Validate + +func main() { + + config := &validator.Config{TagName: "validate"} + + validate = validator.New(config) + + validateStruct() + validateField() +} + +func validateStruct() { + + address := &Address{ + Street: "Eavesdown Docks", + Planet: "Persphone", + Phone: "none", + } + + user := &User{ + FirstName: "Badger", + LastName: "Smith", + Age: 135, + Email: "Badger.Smith@gmail.com", + FavouriteColor: "#000", + Addresses: []*Address{address}, + } + + // returns nil or ValidationErrors ( map[string]*FieldError ) + errs := validate.Struct(user) + + if errs != nil { + + fmt.Println(errs) // output: Key: "User.Age" Error:Field validation for "Age" failed on the "lte" tag + // Key: "User.Addresses[0].City" Error:Field validation for "City" failed on the "required" tag + err := errs.(validator.ValidationErrors)["User.Addresses[0].City"] + fmt.Println(err.Field) // output: City + fmt.Println(err.Tag) // output: required + fmt.Println(err.Kind) // output: string + fmt.Println(err.Type) // output: string + fmt.Println(err.Param) // output: + fmt.Println(err.Value) // output: + + // from here you can create your own error messages in whatever language you wish + return + } + + // save user to database +} + +func validateField() { + myEmail := "joeybloggs.gmail.com" + + errs := validate.Field(myEmail, "required,email") + + if errs != nil { + fmt.Println(errs) // output: Key: "" Error:Field validation for "" failed on the "email" tag + return + } + + // email ok, move on +} +``` + +Custom Field Type +```go +package main + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + + "gopkg.in/go-playground/validator.v8" +) + +// DbBackedUser User struct +type DbBackedUser struct { + Name sql.NullString `validate:"required"` + Age sql.NullInt64 `validate:"required"` +} + +func main() { + + config := &validator.Config{TagName: "validate"} + + validate := validator.New(config) + + // register all sql.Null* types to use the ValidateValuer CustomTypeFunc + validate.RegisterCustomTypeFunc(ValidateValuer, sql.NullString{}, sql.NullInt64{}, sql.NullBool{}, sql.NullFloat64{}) + + x := DbBackedUser{Name: sql.NullString{String: "", Valid: true}, Age: sql.NullInt64{Int64: 0, Valid: false}} + errs := validate.Struct(x) + + if len(errs.(validator.ValidationErrors)) > 0 { + fmt.Printf("Errs:\n%+v\n", errs) + } +} + +// ValidateValuer implements validator.CustomTypeFunc +func ValidateValuer(field reflect.Value) interface{} { + if valuer, ok := field.Interface().(driver.Valuer); ok { + val, err := valuer.Value() + if err == nil { + return val + } + // handle the error how you want + } + return nil +} +``` + +Struct Level Validation +```go +package main + +import ( + "fmt" + "reflect" + + "gopkg.in/go-playground/validator.v8" +) + +// User contains user information +type User struct { + FirstName string `json:"fname"` + LastName string `json:"lname"` + Age uint8 `validate:"gte=0,lte=130"` + Email string `validate:"required,email"` + FavouriteColor string `validate:"hexcolor|rgb|rgba"` + Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage... +} + +// Address houses a users address information +type Address struct { + Street string `validate:"required"` + City string `validate:"required"` + Planet string `validate:"required"` + Phone string `validate:"required"` +} + +var validate *validator.Validate + +func main() { + + config := &validator.Config{TagName: "validate"} + + validate = validator.New(config) + validate.RegisterStructValidation(UserStructLevelValidation, User{}) + + validateStruct() +} + +// UserStructLevelValidation contains custom struct level validations that don't always +// make sense at the field validation level. For Example this function validates that either +// FirstName or LastName exist; could have done that with a custom field validation but then +// would have had to add it to both fields duplicating the logic + overhead, this way it's +// only validated once. +// +// NOTE: you may ask why wouldn't I just do this outside of validator, because doing this way +// hooks right into validator and you can combine with validation tags and still have a +// common error output format. +func UserStructLevelValidation(v *validator.Validate, structLevel *validator.StructLevel) { + + user := structLevel.CurrentStruct.Interface().(User) + + if len(user.FirstName) == 0 && len(user.LastName) == 0 { + structLevel.ReportError(reflect.ValueOf(user.FirstName), "FirstName", "fname", "fnameorlname") + structLevel.ReportError(reflect.ValueOf(user.LastName), "LastName", "lname", "fnameorlname") + } + + // plus can to more, even with different tag than "fnameorlname" +} + +func validateStruct() { + + address := &Address{ + Street: "Eavesdown Docks", + Planet: "Persphone", + Phone: "none", + City: "Unknown", + } + + user := &User{ + FirstName: "", + LastName: "", + Age: 45, + Email: "Badger.Smith@gmail.com", + FavouriteColor: "#000", + Addresses: []*Address{address}, + } + + // returns nil or ValidationErrors ( map[string]*FieldError ) + errs := validate.Struct(user) + + if errs != nil { + + fmt.Println(errs) // output: Key: 'User.LastName' Error:Field validation for 'LastName' failed on the 'fnameorlname' tag + // Key: 'User.FirstName' Error:Field validation for 'FirstName' failed on the 'fnameorlname' tag + err := errs.(validator.ValidationErrors)["User.FirstName"] + fmt.Println(err.Field) // output: FirstName + fmt.Println(err.Tag) // output: fnameorlname + fmt.Println(err.Kind) // output: string + fmt.Println(err.Type) // output: string + fmt.Println(err.Param) // output: + fmt.Println(err.Value) // output: + + // from here you can create your own error messages in whatever language you wish + return + } + + // save user to database +} +``` + +Benchmarks +------ +###### Run on MacBook Pro (Retina, 15-inch, Late 2013) 2.6 GHz Intel Core i7 16 GB 1600 MHz DDR3 using Go version go1.5.3 darwin/amd64 +```go +PASS +BenchmarkFieldSuccess-8 20000000 118 ns/op 0 B/op 0 allocs/op +BenchmarkFieldFailure-8 2000000 758 ns/op 432 B/op 4 allocs/op +BenchmarkFieldDiveSuccess-8 500000 2471 ns/op 464 B/op 28 allocs/op +BenchmarkFieldDiveFailure-8 500000 3172 ns/op 896 B/op 32 allocs/op +BenchmarkFieldCustomTypeSuccess-8 5000000 300 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeFailure-8 2000000 775 ns/op 432 B/op 4 allocs/op +BenchmarkFieldOrTagSuccess-8 1000000 1122 ns/op 4 B/op 1 allocs/op +BenchmarkFieldOrTagFailure-8 1000000 1167 ns/op 448 B/op 6 allocs/op +BenchmarkStructLevelValidationSuccess-8 3000000 548 ns/op 160 B/op 5 allocs/op +BenchmarkStructLevelValidationFailure-8 3000000 558 ns/op 160 B/op 5 allocs/op +BenchmarkStructSimpleCustomTypeSuccess-8 2000000 623 ns/op 36 B/op 3 allocs/op +BenchmarkStructSimpleCustomTypeFailure-8 1000000 1381 ns/op 640 B/op 9 allocs/op +BenchmarkStructPartialSuccess-8 1000000 1036 ns/op 272 B/op 9 allocs/op +BenchmarkStructPartialFailure-8 1000000 1734 ns/op 730 B/op 14 allocs/op +BenchmarkStructExceptSuccess-8 2000000 888 ns/op 250 B/op 7 allocs/op +BenchmarkStructExceptFailure-8 1000000 1036 ns/op 272 B/op 9 allocs/op +BenchmarkStructSimpleCrossFieldSuccess-8 2000000 773 ns/op 80 B/op 4 allocs/op +BenchmarkStructSimpleCrossFieldFailure-8 1000000 1487 ns/op 536 B/op 9 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 1000000 1261 ns/op 112 B/op 7 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailure-8 1000000 2055 ns/op 576 B/op 12 allocs/op +BenchmarkStructSimpleSuccess-8 3000000 519 ns/op 4 B/op 1 allocs/op +BenchmarkStructSimpleFailure-8 1000000 1429 ns/op 640 B/op 9 allocs/op +BenchmarkStructSimpleSuccessParallel-8 10000000 146 ns/op 4 B/op 1 allocs/op +BenchmarkStructSimpleFailureParallel-8 2000000 551 ns/op 640 B/op 9 allocs/op +BenchmarkStructComplexSuccess-8 500000 3269 ns/op 244 B/op 15 allocs/op +BenchmarkStructComplexFailure-8 200000 8436 ns/op 3609 B/op 60 allocs/op +BenchmarkStructComplexSuccessParallel-8 1000000 1024 ns/op 244 B/op 15 allocs/op +BenchmarkStructComplexFailureParallel-8 500000 3536 ns/op 3609 B/op 60 allocs/op +``` + +Complimentary Software +---------------------- + +Here is a list of software that compliments using this library either pre or post validation. + +* [form](https://github.com/go-playground/form) - Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values. Dual Array and Full map support. +* [Conform](https://github.com/leebenson/conform) - Trims, sanitizes & scrubs data based on struct tags. + +How to Contribute +------ + +There will always be a development branch for each version i.e. `v1-development`. In order to contribute, +please make your pull requests against those branches. + +If the changes being proposed or requested are breaking changes, please create an issue, for discussion +or create a pull request against the highest development branch for example this package has a +v1 and v1-development branch however, there will also be a v2-development branch even though v2 doesn't exist yet. + +I strongly encourage everyone whom creates a custom validation function to contribute them and +help make this package even better. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/vendor/gopkg.in/go-playground/validator.v8/baked_in.go b/vendor/gopkg.in/go-playground/validator.v8/baked_in.go new file mode 100644 index 000000000..44aaa0851 --- /dev/null +++ b/vendor/gopkg.in/go-playground/validator.v8/baked_in.go @@ -0,0 +1,1410 @@ +package validator + +import ( + "fmt" + "net" + "net/url" + "reflect" + "strings" + "time" + "unicode/utf8" +) + +// BakedInAliasValidators is a default mapping of a single validationstag that +// defines a common or complex set of validation(s) to simplify +// adding validation to structs. i.e. set key "_ageok" and the tags +// are "gt=0,lte=130" or key "_preferredname" and tags "omitempty,gt=0,lte=60" +var bakedInAliasValidators = map[string]string{ + "iscolor": "hexcolor|rgb|rgba|hsl|hsla", +} + +// BakedInValidators is the default map of ValidationFunc +// you can add, remove or even replace items to suite your needs, +// or even disregard and use your own map if so desired. +var bakedInValidators = map[string]Func{ + "required": HasValue, + "len": HasLengthOf, + "min": HasMinOf, + "max": HasMaxOf, + "eq": IsEq, + "ne": IsNe, + "lt": IsLt, + "lte": IsLte, + "gt": IsGt, + "gte": IsGte, + "eqfield": IsEqField, + "eqcsfield": IsEqCrossStructField, + "necsfield": IsNeCrossStructField, + "gtcsfield": IsGtCrossStructField, + "gtecsfield": IsGteCrossStructField, + "ltcsfield": IsLtCrossStructField, + "ltecsfield": IsLteCrossStructField, + "nefield": IsNeField, + "gtefield": IsGteField, + "gtfield": IsGtField, + "ltefield": IsLteField, + "ltfield": IsLtField, + "alpha": IsAlpha, + "alphanum": IsAlphanum, + "numeric": IsNumeric, + "number": IsNumber, + "hexadecimal": IsHexadecimal, + "hexcolor": IsHEXColor, + "rgb": IsRGB, + "rgba": IsRGBA, + "hsl": IsHSL, + "hsla": IsHSLA, + "email": IsEmail, + "url": IsURL, + "uri": IsURI, + "base64": IsBase64, + "contains": Contains, + "containsany": ContainsAny, + "containsrune": ContainsRune, + "excludes": Excludes, + "excludesall": ExcludesAll, + "excludesrune": ExcludesRune, + "isbn": IsISBN, + "isbn10": IsISBN10, + "isbn13": IsISBN13, + "uuid": IsUUID, + "uuid3": IsUUID3, + "uuid4": IsUUID4, + "uuid5": IsUUID5, + "ascii": IsASCII, + "printascii": IsPrintableASCII, + "multibyte": HasMultiByteCharacter, + "datauri": IsDataURI, + "latitude": IsLatitude, + "longitude": IsLongitude, + "ssn": IsSSN, + "ipv4": IsIPv4, + "ipv6": IsIPv6, + "ip": IsIP, + "cidrv4": IsCIDRv4, + "cidrv6": IsCIDRv6, + "cidr": IsCIDR, + "tcp4_addr": IsTCP4AddrResolvable, + "tcp6_addr": IsTCP6AddrResolvable, + "tcp_addr": IsTCPAddrResolvable, + "udp4_addr": IsUDP4AddrResolvable, + "udp6_addr": IsUDP6AddrResolvable, + "udp_addr": IsUDPAddrResolvable, + "ip4_addr": IsIP4AddrResolvable, + "ip6_addr": IsIP6AddrResolvable, + "ip_addr": IsIPAddrResolvable, + "unix_addr": IsUnixAddrResolvable, + "mac": IsMAC, +} + +// IsMAC is the validation function for validating if the field's value is a valid MAC address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsMAC(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + _, err := net.ParseMAC(field.String()) + return err == nil +} + +// IsCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsCIDRv4(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + ip, _, err := net.ParseCIDR(field.String()) + + return err == nil && ip.To4() != nil +} + +// IsCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsCIDRv6(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + ip, _, err := net.ParseCIDR(field.String()) + + return err == nil && ip.To4() == nil +} + +// IsCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsCIDR(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + _, _, err := net.ParseCIDR(field.String()) + + return err == nil +} + +// IsIPv4 is the validation function for validating if a value is a valid v4 IP address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsIPv4(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + ip := net.ParseIP(field.String()) + + return ip != nil && ip.To4() != nil +} + +// IsIPv6 is the validation function for validating if the field's value is a valid v6 IP address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsIPv6(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + ip := net.ParseIP(field.String()) + + return ip != nil && ip.To4() == nil +} + +// IsIP is the validation function for validating if the field's value is a valid v4 or v6 IP address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsIP(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + ip := net.ParseIP(field.String()) + + return ip != nil +} + +// IsSSN is the validation function for validating if the field's value is a valid SSN. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsSSN(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + if field.Len() != 11 { + return false + } + + return sSNRegex.MatchString(field.String()) +} + +// IsLongitude is the validation function for validating if the field's value is a valid longitude coordinate. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsLongitude(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return longitudeRegex.MatchString(field.String()) +} + +// IsLatitude is the validation function for validating if the field's value is a valid latitude coordinate. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsLatitude(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return latitudeRegex.MatchString(field.String()) +} + +// IsDataURI is the validation function for validating if the field's value is a valid data URI. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsDataURI(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + uri := strings.SplitN(field.String(), ",", 2) + + if len(uri) != 2 { + return false + } + + if !dataURIRegex.MatchString(uri[0]) { + return false + } + + fld := reflect.ValueOf(uri[1]) + + return IsBase64(v, topStruct, currentStructOrField, fld, fld.Type(), fld.Kind(), param) +} + +// HasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func HasMultiByteCharacter(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + if field.Len() == 0 { + return true + } + + return multibyteRegex.MatchString(field.String()) +} + +// IsPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsPrintableASCII(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return printableASCIIRegex.MatchString(field.String()) +} + +// IsASCII is the validation function for validating if the field's value is a valid ASCII character. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsASCII(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return aSCIIRegex.MatchString(field.String()) +} + +// IsUUID5 is the validation function for validating if the field's value is a valid v5 UUID. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsUUID5(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return uUID5Regex.MatchString(field.String()) +} + +// IsUUID4 is the validation function for validating if the field's value is a valid v4 UUID. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsUUID4(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return uUID4Regex.MatchString(field.String()) +} + +// IsUUID3 is the validation function for validating if the field's value is a valid v3 UUID. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsUUID3(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return uUID3Regex.MatchString(field.String()) +} + +// IsUUID is the validation function for validating if the field's value is a valid UUID of any version. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsUUID(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return uUIDRegex.MatchString(field.String()) +} + +// IsISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsISBN(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return IsISBN10(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) || IsISBN13(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) +} + +// IsISBN13 is the validation function for validating if the field's value is a valid v13 ISBN. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsISBN13(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + s := strings.Replace(strings.Replace(field.String(), "-", "", 4), " ", "", 4) + + if !iSBN13Regex.MatchString(s) { + return false + } + + var checksum int32 + var i int32 + + factor := []int32{1, 3} + + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(s[i]-'0') + } + + return (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0 +} + +// IsISBN10 is the validation function for validating if the field's value is a valid v10 ISBN. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsISBN10(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + s := strings.Replace(strings.Replace(field.String(), "-", "", 3), " ", "", 3) + + if !iSBN10Regex.MatchString(s) { + return false + } + + var checksum int32 + var i int32 + + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(s[i]-'0') + } + + if s[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(s[9]-'0') + } + + return checksum%11 == 0 +} + +// ExcludesRune is the validation function for validating that the field's value does not contain the rune specified within the param. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func ExcludesRune(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return !ContainsRune(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) +} + +// ExcludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func ExcludesAll(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return !ContainsAny(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) +} + +// Excludes is the validation function for validating that the field's value does not contain the text specified within the param. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func Excludes(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return !Contains(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) +} + +// ContainsRune is the validation function for validating that the field's value contains the rune specified within the param. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func ContainsRune(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + r, _ := utf8.DecodeRuneInString(param) + + return strings.ContainsRune(field.String(), r) +} + +// ContainsAny is the validation function for validating that the field's value contains any of the characters specified within the param. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func ContainsAny(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return strings.ContainsAny(field.String(), param) +} + +// Contains is the validation function for validating that the field's value contains the text specified within the param. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func Contains(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return strings.Contains(field.String(), param) +} + +// IsNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsNeField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param) + + if !ok || currentKind != fieldKind { + return true + } + + switch fieldKind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() != currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() != currentField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() != currentField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) != int64(currentField.Len()) + + case reflect.Struct: + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return true + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return !fieldTime.Equal(t) + } + + } + + // default reflect.String: + return field.String() != currentField.String() +} + +// IsNe is the validation function for validating that the field's value does not equal the provided param value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsNe(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return !IsEq(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) +} + +// IsLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsLteCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + topField, topKind, ok := v.GetStructFieldOK(topStruct, param) + if !ok || topKind != fieldKind { + return false + } + + switch fieldKind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() <= topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() <= topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() <= topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) <= int64(topField.Len()) + + case reflect.Struct: + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.Before(topTime) || fieldTime.Equal(topTime) + } + } + + // default reflect.String: + return field.String() <= topField.String() +} + +// IsLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsLtCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + topField, topKind, ok := v.GetStructFieldOK(topStruct, param) + if !ok || topKind != fieldKind { + return false + } + + switch fieldKind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() < topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() < topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() < topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) < int64(topField.Len()) + + case reflect.Struct: + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.Before(topTime) + } + } + + // default reflect.String: + return field.String() < topField.String() +} + +// IsGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsGteCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + topField, topKind, ok := v.GetStructFieldOK(topStruct, param) + if !ok || topKind != fieldKind { + return false + } + + switch fieldKind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() >= topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() >= topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() >= topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) >= int64(topField.Len()) + + case reflect.Struct: + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.After(topTime) || fieldTime.Equal(topTime) + } + } + + // default reflect.String: + return field.String() >= topField.String() +} + +// IsGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsGtCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + topField, topKind, ok := v.GetStructFieldOK(topStruct, param) + if !ok || topKind != fieldKind { + return false + } + + switch fieldKind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() > topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() > topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() > topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) > int64(topField.Len()) + + case reflect.Struct: + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.After(topTime) + } + } + + // default reflect.String: + return field.String() > topField.String() +} + +// IsNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsNeCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + topField, currentKind, ok := v.GetStructFieldOK(topStruct, param) + if !ok || currentKind != fieldKind { + return true + } + + switch fieldKind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return topField.Int() != field.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return topField.Uint() != field.Uint() + + case reflect.Float32, reflect.Float64: + return topField.Float() != field.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(topField.Len()) != int64(field.Len()) + + case reflect.Struct: + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return true + } + + if fieldType == timeType { + + t := field.Interface().(time.Time) + fieldTime := topField.Interface().(time.Time) + + return !fieldTime.Equal(t) + } + } + + // default reflect.String: + return topField.String() != field.String() +} + +// IsEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsEqCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + topField, topKind, ok := v.GetStructFieldOK(topStruct, param) + if !ok || topKind != fieldKind { + return false + } + + switch fieldKind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return topField.Int() == field.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return topField.Uint() == field.Uint() + + case reflect.Float32, reflect.Float64: + return topField.Float() == field.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(topField.Len()) == int64(field.Len()) + + case reflect.Struct: + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + t := field.Interface().(time.Time) + fieldTime := topField.Interface().(time.Time) + + return fieldTime.Equal(t) + } + } + + // default reflect.String: + return topField.String() == field.String() +} + +// IsEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsEqField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param) + if !ok || currentKind != fieldKind { + return false + } + + switch fieldKind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() == currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() == currentField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() == currentField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) == int64(currentField.Len()) + + case reflect.Struct: + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Equal(t) + } + + } + + // default reflect.String: + return field.String() == currentField.String() +} + +// IsEq is the validation function for validating if the current field's value is equal to the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsEq(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + switch fieldKind { + + case reflect.String: + return field.String() == param + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) == p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asInt(param) + + return field.Int() == p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() == p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() == p + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsBase64 is the validation function for validating if the current field's value is a valid base 64. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsBase64(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return base64Regex.MatchString(field.String()) +} + +// IsURI is the validation function for validating if the current field's value is a valid URI. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsURI(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + switch fieldKind { + + case reflect.String: + + s := field.String() + + // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 + // emulate browser and strip the '#' suffix prior to validation. see issue-#237 + if i := strings.Index(s, "#"); i > -1 { + s = s[:i] + } + + if s == blank { + return false + } + + _, err := url.ParseRequestURI(s) + + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsURL is the validation function for validating if the current field's value is a valid URL. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsURL(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + switch fieldKind { + + case reflect.String: + + var i int + s := field.String() + + // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 + // emulate browser and strip the '#' suffix prior to validation. see issue-#237 + if i = strings.Index(s, "#"); i > -1 { + s = s[:i] + } + + if s == blank { + return false + } + + url, err := url.ParseRequestURI(s) + + if err != nil || url.Scheme == blank { + return false + } + + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsEmail is the validation function for validating if the current field's value is a valid email address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsEmail(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return emailRegex.MatchString(field.String()) +} + +// IsHSLA is the validation function for validating if the current field's value is a valid HSLA color. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsHSLA(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return hslaRegex.MatchString(field.String()) +} + +// IsHSL is the validation function for validating if the current field's value is a valid HSL color. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsHSL(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return hslRegex.MatchString(field.String()) +} + +// IsRGBA is the validation function for validating if the current field's value is a valid RGBA color. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsRGBA(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return rgbaRegex.MatchString(field.String()) +} + +// IsRGB is the validation function for validating if the current field's value is a valid RGB color. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsRGB(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return rgbRegex.MatchString(field.String()) +} + +// IsHEXColor is the validation function for validating if the current field's value is a valid HEX color. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsHEXColor(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return hexcolorRegex.MatchString(field.String()) +} + +// IsHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsHexadecimal(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return hexadecimalRegex.MatchString(field.String()) +} + +// IsNumber is the validation function for validating if the current field's value is a valid number. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsNumber(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return numberRegex.MatchString(field.String()) +} + +// IsNumeric is the validation function for validating if the current field's value is a valid numeric value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsNumeric(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return numericRegex.MatchString(field.String()) +} + +// IsAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsAlphanum(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return alphaNumericRegex.MatchString(field.String()) +} + +// IsAlpha is the validation function for validating if the current field's value is a valid alpha value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsAlpha(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return alphaRegex.MatchString(field.String()) +} + +// HasValue is the validation function for validating if the current field's value is not the default static value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func HasValue(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + switch fieldKind { + case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: + return !field.IsNil() + default: + return field.IsValid() && field.Interface() != reflect.Zero(fieldType).Interface() + } +} + +// IsGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsGteField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param) + if !ok || currentKind != fieldKind { + return false + } + + switch fieldKind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() >= currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() >= currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() >= currentField.Float() + + case reflect.Struct: + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.After(t) || fieldTime.Equal(t) + } + } + + // default reflect.String + return len(field.String()) >= len(currentField.String()) +} + +// IsGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsGtField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param) + if !ok || currentKind != fieldKind { + return false + } + + switch fieldKind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() > currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() > currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() > currentField.Float() + + case reflect.Struct: + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.After(t) + } + } + + // default reflect.String + return len(field.String()) > len(currentField.String()) +} + +// IsGte is the validation function for validating if the current field's value is greater than or equal to the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsGte(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + switch fieldKind { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) >= p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) >= p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asInt(param) + + return field.Int() >= p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() >= p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() >= p + + case reflect.Struct: + + if fieldType == timeType || fieldType == timePtrType { + + now := time.Now().UTC() + t := field.Interface().(time.Time) + + return t.After(now) || t.Equal(now) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsGt is the validation function for validating if the current field's value is greater than the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsGt(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + switch fieldKind { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) > p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) > p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asInt(param) + + return field.Int() > p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() > p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() > p + case reflect.Struct: + + if fieldType == timeType || fieldType == timePtrType { + + return field.Interface().(time.Time).After(time.Now().UTC()) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// HasLengthOf is the validation function for validating if the current field's value is equal to the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func HasLengthOf(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + switch fieldKind { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) == p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) == p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asInt(param) + + return field.Int() == p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() == p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() == p + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// HasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func HasMinOf(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + return IsGte(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) +} + +// IsLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsLteField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param) + if !ok || currentKind != fieldKind { + return false + } + + switch fieldKind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() <= currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() <= currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() <= currentField.Float() + + case reflect.Struct: + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Before(t) || fieldTime.Equal(t) + } + } + + // default reflect.String + return len(field.String()) <= len(currentField.String()) +} + +// IsLtField is the validation function for validating if the current field's value is less than the field specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsLtField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param) + if !ok || currentKind != fieldKind { + return false + } + + switch fieldKind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() < currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() < currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() < currentField.Float() + + case reflect.Struct: + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Before(t) + } + } + + // default reflect.String + return len(field.String()) < len(currentField.String()) +} + +// IsLte is the validation function for validating if the current field's value is less than or equal to the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsLte(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + switch fieldKind { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) <= p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) <= p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asInt(param) + + return field.Int() <= p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() <= p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() <= p + + case reflect.Struct: + + if fieldType == timeType || fieldType == timePtrType { + + now := time.Now().UTC() + t := field.Interface().(time.Time) + + return t.Before(now) || t.Equal(now) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsLt is the validation function for validating if the current field's value is less than the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsLt(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + switch fieldKind { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) < p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) < p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asInt(param) + + return field.Int() < p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() < p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() < p + + case reflect.Struct: + + if fieldType == timeType || fieldType == timePtrType { + + return field.Interface().(time.Time).Before(time.Now().UTC()) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// HasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func HasMaxOf(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + return IsLte(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) +} + +// IsTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsTCP4AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + if !isIP4Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { + return false + } + + _, err := net.ResolveTCPAddr("tcp4", field.String()) + return err == nil +} + +// IsTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsTCP6AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + if !isIP6Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { + return false + } + + _, err := net.ResolveTCPAddr("tcp6", field.String()) + return err == nil +} + +// IsTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsTCPAddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + if !isIP4Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) && + !isIP6Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { + return false + } + + _, err := net.ResolveTCPAddr("tcp", field.String()) + return err == nil +} + +// IsUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsUDP4AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + if !isIP4Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { + return false + } + + _, err := net.ResolveUDPAddr("udp4", field.String()) + return err == nil +} + +// IsUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsUDP6AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + if !isIP6Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { + return false + } + + _, err := net.ResolveUDPAddr("udp6", field.String()) + return err == nil +} + +// IsUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsUDPAddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + if !isIP4Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) && + !isIP6Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { + return false + } + + _, err := net.ResolveUDPAddr("udp", field.String()) + return err == nil +} + +// IsIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsIP4AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + if !IsIPv4(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { + return false + } + + _, err := net.ResolveIPAddr("ip4", field.String()) + return err == nil +} + +// IsIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsIP6AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + if !IsIPv6(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { + return false + } + + _, err := net.ResolveIPAddr("ip6", field.String()) + return err == nil +} + +// IsIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsIPAddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + if !IsIP(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { + return false + } + + _, err := net.ResolveIPAddr("ip", field.String()) + return err == nil +} + +// IsUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func IsUnixAddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + _, err := net.ResolveUnixAddr("unix", field.String()) + return err == nil +} + +func isIP4Addr(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + val := field.String() + + if idx := strings.LastIndex(val, ":"); idx != -1 { + val = val[0:idx] + } + + if !IsIPv4(v, topStruct, currentStructOrField, reflect.ValueOf(val), fieldType, fieldKind, param) { + return false + } + + return true +} + +func isIP6Addr(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + val := field.String() + + if idx := strings.LastIndex(val, ":"); idx != -1 { + if idx != 0 && val[idx-1:idx] == "]" { + val = val[1 : idx-1] + } + } + + if !IsIPv6(v, topStruct, currentStructOrField, reflect.ValueOf(val), fieldType, fieldKind, param) { + return false + } + + return true +} diff --git a/vendor/gopkg.in/go-playground/validator.v8/benchmarks_test.go b/vendor/gopkg.in/go-playground/validator.v8/benchmarks_test.go new file mode 100644 index 000000000..84db7431f --- /dev/null +++ b/vendor/gopkg.in/go-playground/validator.v8/benchmarks_test.go @@ -0,0 +1,524 @@ +package validator + +import ( + sql "database/sql/driver" + "testing" + "time" +) + +func BenchmarkFieldSuccess(b *testing.B) { + + var s *string + tmp := "1" + s = &tmp + + for n := 0; n < b.N; n++ { + validate.Field(s, "len=1") + } +} + +func BenchmarkFieldFailure(b *testing.B) { + + var s *string + tmp := "12" + s = &tmp + + for n := 0; n < b.N; n++ { + validate.Field(s, "len=1") + } +} + +func BenchmarkFieldDiveSuccess(b *testing.B) { + + m := make([]*string, 3) + t1 := "val1" + t2 := "val2" + t3 := "val3" + + m[0] = &t1 + m[1] = &t2 + m[2] = &t3 + + for n := 0; n < b.N; n++ { + validate.Field(m, "required,dive,required") + } +} + +func BenchmarkFieldDiveFailure(b *testing.B) { + + m := make([]*string, 3) + t1 := "val1" + t2 := "" + t3 := "val3" + + m[0] = &t1 + m[1] = &t2 + m[2] = &t3 + + for n := 0; n < b.N; n++ { + validate.Field(m, "required,dive,required") + } +} + +func BenchmarkFieldCustomTypeSuccess(b *testing.B) { + + validate.RegisterCustomTypeFunc(ValidateValuerType, (*sql.Valuer)(nil), valuer{}) + + val := valuer{ + Name: "1", + } + + for n := 0; n < b.N; n++ { + validate.Field(val, "len=1") + } +} + +func BenchmarkFieldCustomTypeFailure(b *testing.B) { + + validate.RegisterCustomTypeFunc(ValidateValuerType, (*sql.Valuer)(nil), valuer{}) + + val := valuer{} + + for n := 0; n < b.N; n++ { + validate.Field(val, "len=1") + } +} + +func BenchmarkFieldOrTagSuccess(b *testing.B) { + + var s *string + tmp := "rgba(0,0,0,1)" + s = &tmp + + for n := 0; n < b.N; n++ { + validate.Field(s, "rgb|rgba") + } +} + +func BenchmarkFieldOrTagFailure(b *testing.B) { + + var s *string + tmp := "#000" + s = &tmp + + for n := 0; n < b.N; n++ { + validate.Field(s, "rgb|rgba") + } +} + +func BenchmarkStructLevelValidationSuccess(b *testing.B) { + + validate.RegisterStructValidation(StructValidationTestStructSuccess, TestStruct{}) + + tst := &TestStruct{ + String: "good value", + } + + for n := 0; n < b.N; n++ { + validate.Struct(tst) + } +} + +func BenchmarkStructLevelValidationFailure(b *testing.B) { + + validate.RegisterStructValidation(StructValidationTestStruct, TestStruct{}) + + tst := &TestStruct{ + String: "good value", + } + + for n := 0; n < b.N; n++ { + validate.Struct(tst) + } +} + +func BenchmarkStructSimpleCustomTypeSuccess(b *testing.B) { + + validate.RegisterCustomTypeFunc(ValidateValuerType, (*sql.Valuer)(nil), valuer{}) + + val := valuer{ + Name: "1", + } + + type Foo struct { + Valuer valuer `validate:"len=1"` + IntValue int `validate:"min=5,max=10"` + } + + validFoo := &Foo{Valuer: val, IntValue: 7} + + for n := 0; n < b.N; n++ { + validate.Struct(validFoo) + } +} + +func BenchmarkStructSimpleCustomTypeFailure(b *testing.B) { + + validate.RegisterCustomTypeFunc(ValidateValuerType, (*sql.Valuer)(nil), valuer{}) + + val := valuer{} + + type Foo struct { + Valuer valuer `validate:"len=1"` + IntValue int `validate:"min=5,max=10"` + } + + validFoo := &Foo{Valuer: val, IntValue: 3} + + for n := 0; n < b.N; n++ { + validate.Struct(validFoo) + } +} + +func BenchmarkStructPartialSuccess(b *testing.B) { + + type Test struct { + Name string `validate:"required"` + NickName string `validate:"required"` + } + + test := &Test{ + Name: "Joey Bloggs", + } + + for n := 0; n < b.N; n++ { + validate.StructPartial(test, "Name") + } +} + +func BenchmarkStructPartialFailure(b *testing.B) { + + type Test struct { + Name string `validate:"required"` + NickName string `validate:"required"` + } + + test := &Test{ + Name: "Joey Bloggs", + } + + for n := 0; n < b.N; n++ { + validate.StructPartial(test, "NickName") + } +} + +func BenchmarkStructExceptSuccess(b *testing.B) { + + type Test struct { + Name string `validate:"required"` + NickName string `validate:"required"` + } + + test := &Test{ + Name: "Joey Bloggs", + } + + for n := 0; n < b.N; n++ { + validate.StructPartial(test, "Nickname") + } +} + +func BenchmarkStructExceptFailure(b *testing.B) { + + type Test struct { + Name string `validate:"required"` + NickName string `validate:"required"` + } + + test := &Test{ + Name: "Joey Bloggs", + } + + for n := 0; n < b.N; n++ { + validate.StructPartial(test, "Name") + } +} + +func BenchmarkStructSimpleCrossFieldSuccess(b *testing.B) { + + type Test struct { + Start time.Time + End time.Time `validate:"gtfield=Start"` + } + + now := time.Now().UTC() + then := now.Add(time.Hour * 5) + + test := &Test{ + Start: now, + End: then, + } + + for n := 0; n < b.N; n++ { + validate.Struct(test) + } +} + +func BenchmarkStructSimpleCrossFieldFailure(b *testing.B) { + + type Test struct { + Start time.Time + End time.Time `validate:"gtfield=Start"` + } + + now := time.Now().UTC() + then := now.Add(time.Hour * -5) + + test := &Test{ + Start: now, + End: then, + } + + for n := 0; n < b.N; n++ { + validate.Struct(test) + } +} + +func BenchmarkStructSimpleCrossStructCrossFieldSuccess(b *testing.B) { + + type Inner struct { + Start time.Time + } + + type Outer struct { + Inner *Inner + CreatedAt time.Time `validate:"eqcsfield=Inner.Start"` + } + + now := time.Now().UTC() + + inner := &Inner{ + Start: now, + } + + outer := &Outer{ + Inner: inner, + CreatedAt: now, + } + + for n := 0; n < b.N; n++ { + validate.Struct(outer) + } +} + +func BenchmarkStructSimpleCrossStructCrossFieldFailure(b *testing.B) { + + type Inner struct { + Start time.Time + } + + type Outer struct { + Inner *Inner + CreatedAt time.Time `validate:"eqcsfield=Inner.Start"` + } + + now := time.Now().UTC() + then := now.Add(time.Hour * 5) + + inner := &Inner{ + Start: then, + } + + outer := &Outer{ + Inner: inner, + CreatedAt: now, + } + + for n := 0; n < b.N; n++ { + validate.Struct(outer) + } +} + +func BenchmarkStructSimpleSuccess(b *testing.B) { + + type Foo struct { + StringValue string `validate:"min=5,max=10"` + IntValue int `validate:"min=5,max=10"` + } + + validFoo := &Foo{StringValue: "Foobar", IntValue: 7} + + for n := 0; n < b.N; n++ { + validate.Struct(validFoo) + } +} + +func BenchmarkStructSimpleFailure(b *testing.B) { + + type Foo struct { + StringValue string `validate:"min=5,max=10"` + IntValue int `validate:"min=5,max=10"` + } + + invalidFoo := &Foo{StringValue: "Fo", IntValue: 3} + + for n := 0; n < b.N; n++ { + validate.Struct(invalidFoo) + } +} + +func BenchmarkStructSimpleSuccessParallel(b *testing.B) { + + type Foo struct { + StringValue string `validate:"min=5,max=10"` + IntValue int `validate:"min=5,max=10"` + } + + validFoo := &Foo{StringValue: "Foobar", IntValue: 7} + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + validate.Struct(validFoo) + } + }) +} + +func BenchmarkStructSimpleFailureParallel(b *testing.B) { + + type Foo struct { + StringValue string `validate:"min=5,max=10"` + IntValue int `validate:"min=5,max=10"` + } + + invalidFoo := &Foo{StringValue: "Fo", IntValue: 3} + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + validate.Struct(invalidFoo) + } + }) +} + +func BenchmarkStructComplexSuccess(b *testing.B) { + + tSuccess := &TestString{ + Required: "Required", + Len: "length==10", + Min: "min=1", + Max: "1234567890", + MinMax: "12345", + Lt: "012345678", + Lte: "0123456789", + Gt: "01234567890", + Gte: "0123456789", + OmitEmpty: "", + Sub: &SubTest{ + Test: "1", + }, + SubIgnore: &SubTest{ + Test: "", + }, + Anonymous: struct { + A string `validate:"required"` + }{ + A: "1", + }, + Iface: &Impl{ + F: "123", + }, + } + + for n := 0; n < b.N; n++ { + validate.Struct(tSuccess) + } +} + +func BenchmarkStructComplexFailure(b *testing.B) { + + tFail := &TestString{ + Required: "", + Len: "", + Min: "", + Max: "12345678901", + MinMax: "", + Lt: "0123456789", + Lte: "01234567890", + Gt: "1", + Gte: "1", + OmitEmpty: "12345678901", + Sub: &SubTest{ + Test: "", + }, + Anonymous: struct { + A string `validate:"required"` + }{ + A: "", + }, + Iface: &Impl{ + F: "12", + }, + } + + for n := 0; n < b.N; n++ { + validate.Struct(tFail) + } +} + +func BenchmarkStructComplexSuccessParallel(b *testing.B) { + + tSuccess := &TestString{ + Required: "Required", + Len: "length==10", + Min: "min=1", + Max: "1234567890", + MinMax: "12345", + Lt: "012345678", + Lte: "0123456789", + Gt: "01234567890", + Gte: "0123456789", + OmitEmpty: "", + Sub: &SubTest{ + Test: "1", + }, + SubIgnore: &SubTest{ + Test: "", + }, + Anonymous: struct { + A string `validate:"required"` + }{ + A: "1", + }, + Iface: &Impl{ + F: "123", + }, + } + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + validate.Struct(tSuccess) + } + }) +} + +func BenchmarkStructComplexFailureParallel(b *testing.B) { + + tFail := &TestString{ + Required: "", + Len: "", + Min: "", + Max: "12345678901", + MinMax: "", + Lt: "0123456789", + Lte: "01234567890", + Gt: "1", + Gte: "1", + OmitEmpty: "12345678901", + Sub: &SubTest{ + Test: "", + }, + Anonymous: struct { + A string `validate:"required"` + }{ + A: "", + }, + Iface: &Impl{ + F: "12", + }, + } + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + validate.Struct(tFail) + } + }) +} diff --git a/vendor/gopkg.in/go-playground/validator.v8/cache.go b/vendor/gopkg.in/go-playground/validator.v8/cache.go new file mode 100644 index 000000000..76670c1dd --- /dev/null +++ b/vendor/gopkg.in/go-playground/validator.v8/cache.go @@ -0,0 +1,263 @@ +package validator + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type tagType uint8 + +const ( + typeDefault tagType = iota + typeOmitEmpty + typeNoStructLevel + typeStructOnly + typeDive + typeOr + typeExists +) + +type structCache struct { + lock sync.Mutex + m atomic.Value // map[reflect.Type]*cStruct +} + +func (sc *structCache) Get(key reflect.Type) (c *cStruct, found bool) { + c, found = sc.m.Load().(map[reflect.Type]*cStruct)[key] + return +} + +func (sc *structCache) Set(key reflect.Type, value *cStruct) { + + m := sc.m.Load().(map[reflect.Type]*cStruct) + + nm := make(map[reflect.Type]*cStruct, len(m)+1) + for k, v := range m { + nm[k] = v + } + nm[key] = value + sc.m.Store(nm) +} + +type tagCache struct { + lock sync.Mutex + m atomic.Value // map[string]*cTag +} + +func (tc *tagCache) Get(key string) (c *cTag, found bool) { + c, found = tc.m.Load().(map[string]*cTag)[key] + return +} + +func (tc *tagCache) Set(key string, value *cTag) { + + m := tc.m.Load().(map[string]*cTag) + + nm := make(map[string]*cTag, len(m)+1) + for k, v := range m { + nm[k] = v + } + nm[key] = value + tc.m.Store(nm) +} + +type cStruct struct { + Name string + fields map[int]*cField + fn StructLevelFunc +} + +type cField struct { + Idx int + Name string + AltName string + cTags *cTag +} + +type cTag struct { + tag string + aliasTag string + actualAliasTag string + param string + hasAlias bool + typeof tagType + hasTag bool + fn Func + next *cTag +} + +func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStruct { + + v.structCache.lock.Lock() + defer v.structCache.lock.Unlock() // leave as defer! because if inner panics, it will never get unlocked otherwise! + + typ := current.Type() + + // could have been multiple trying to access, but once first is done this ensures struct + // isn't parsed again. + cs, ok := v.structCache.Get(typ) + if ok { + return cs + } + + cs = &cStruct{Name: sName, fields: make(map[int]*cField), fn: v.structLevelFuncs[typ]} + + numFields := current.NumField() + + var ctag *cTag + var fld reflect.StructField + var tag string + var customName string + + for i := 0; i < numFields; i++ { + + fld = typ.Field(i) + + if !fld.Anonymous && fld.PkgPath != blank { + continue + } + + tag = fld.Tag.Get(v.tagName) + + if tag == skipValidationTag { + continue + } + + customName = fld.Name + + if v.fieldNameTag != blank { + + name := strings.SplitN(fld.Tag.Get(v.fieldNameTag), ",", 2)[0] + + // dash check is for json "-" (aka skipValidationTag) means don't output in json + if name != "" && name != skipValidationTag { + customName = name + } + } + + // NOTE: cannot use shared tag cache, because tags may be equal, but things like alias may be different + // and so only struct level caching can be used instead of combined with Field tag caching + + if len(tag) > 0 { + ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, blank, false) + } else { + // even if field doesn't have validations need cTag for traversing to potential inner/nested + // elements of the field. + ctag = new(cTag) + } + + cs.fields[i] = &cField{Idx: i, Name: fld.Name, AltName: customName, cTags: ctag} + } + + v.structCache.Set(typ, cs) + + return cs +} + +func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) { + + var t string + var ok bool + noAlias := len(alias) == 0 + tags := strings.Split(tag, tagSeparator) + + for i := 0; i < len(tags); i++ { + + t = tags[i] + + if noAlias { + alias = t + } + + if v.hasAliasValidators { + // check map for alias and process new tags, otherwise process as usual + if tagsVal, found := v.aliasValidators[t]; found { + + if i == 0 { + firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) + } else { + next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) + current.next, current = next, curr + + } + + continue + } + } + + if i == 0 { + current = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true} + firstCtag = current + } else { + current.next = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true} + current = current.next + } + + switch t { + + case diveTag: + current.typeof = typeDive + continue + + case omitempty: + current.typeof = typeOmitEmpty + continue + + case structOnlyTag: + current.typeof = typeStructOnly + continue + + case noStructLevelTag: + current.typeof = typeNoStructLevel + continue + + case existsTag: + current.typeof = typeExists + continue + + default: + + // if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C" + orVals := strings.Split(t, orSeparator) + + for j := 0; j < len(orVals); j++ { + + vals := strings.SplitN(orVals[j], tagKeySeparator, 2) + + if noAlias { + alias = vals[0] + current.aliasTag = alias + } else { + current.actualAliasTag = t + } + + if j > 0 { + current.next = &cTag{aliasTag: alias, actualAliasTag: current.actualAliasTag, hasAlias: hasAlias, hasTag: true} + current = current.next + } + + current.tag = vals[0] + if len(current.tag) == 0 { + panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName))) + } + + if current.fn, ok = v.validationFuncs[current.tag]; !ok { + panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, fieldName))) + } + + if len(orVals) > 1 { + current.typeof = typeOr + } + + if len(vals) > 1 { + current.param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1) + } + } + } + } + + return +} diff --git a/vendor/gopkg.in/go-playground/validator.v8/doc.go b/vendor/gopkg.in/go-playground/validator.v8/doc.go new file mode 100644 index 000000000..fdede2c4c --- /dev/null +++ b/vendor/gopkg.in/go-playground/validator.v8/doc.go @@ -0,0 +1,852 @@ +/* +Package validator implements value validations for structs and individual fields +based on tags. + +It can also handle Cross-Field and Cross-Struct validation for nested structs +and has the ability to dive into arrays and maps of any type. + +Why not a better error message? +Because this library intends for you to handle your own error messages. + +Why should I handle my own errors? +Many reasons. We built an internationalized application and needed to know the +field, and what validation failed so we could provide a localized error. + + if fieldErr.Field == "Name" { + switch fieldErr.ErrorTag + case "required": + return "Translated string based on field + error" + default: + return "Translated string based on field" + } + + +Validation Functions Return Type error + +Doing things this way is actually the way the standard library does, see the +file.Open method here: + + https://golang.org/pkg/os/#Open. + +The authors return type "error" to avoid the issue discussed in the following, +where err is always != nil: + + http://stackoverflow.com/a/29138676/3158232 + https://github.com/go-playground/validator/issues/134 + +Validator only returns nil or ValidationErrors as type error; so, in your code +all you need to do is check if the error returned is not nil, and if it's not +type cast it to type ValidationErrors like so err.(validator.ValidationErrors). + +Custom Functions + +Custom functions can be added. Example: + + // Structure + func customFunc(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + if whatever { + return false + } + + return true + } + + validate.RegisterValidation("custom tag name", customFunc) + // NOTES: using the same tag name as an existing function + // will overwrite the existing one + +Cross-Field Validation + +Cross-Field Validation can be done via the following tags: + - eqfield + - nefield + - gtfield + - gtefield + - ltfield + - ltefield + - eqcsfield + - necsfield + - gtcsfield + - ftecsfield + - ltcsfield + - ltecsfield + +If, however, some custom cross-field validation is required, it can be done +using a custom validation. + +Why not just have cross-fields validation tags (i.e. only eqcsfield and not +eqfield)? + +The reason is efficiency. If you want to check a field within the same struct +"eqfield" only has to find the field on the same struct (1 level). But, if we +used "eqcsfield" it could be multiple levels down. Example: + + type Inner struct { + StartDate time.Time + } + + type Outer struct { + InnerStructField *Inner + CreatedAt time.Time `validate:"ltecsfield=InnerStructField.StartDate"` + } + + now := time.Now() + + inner := &Inner{ + StartDate: now, + } + + outer := &Outer{ + InnerStructField: inner, + CreatedAt: now, + } + + errs := validate.Struct(outer) + + // NOTE: when calling validate.Struct(val) topStruct will be the top level struct passed + // into the function + // when calling validate.FieldWithValue(val, field, tag) val will be + // whatever you pass, struct, field... + // when calling validate.Field(field, tag) val will be nil + +Multiple Validators + +Multiple validators on a field will process in the order defined. Example: + + type Test struct { + Field `validate:"max=10,min=1"` + } + + // max will be checked then min + +Bad Validator definitions are not handled by the library. Example: + + type Test struct { + Field `validate:"min=10,max=0"` + } + + // this definition of min max will never succeed + +Using Validator Tags + +Baked In Cross-Field validation only compares fields on the same struct. +If Cross-Field + Cross-Struct validation is needed you should implement your +own custom validator. + +Comma (",") is the default separator of validation tags. If you wish to +have a comma included within the parameter (i.e. excludesall=,) you will need to +use the UTF-8 hex representation 0x2C, which is replaced in the code as a comma, +so the above will become excludesall=0x2C. + + type Test struct { + Field `validate:"excludesall=,"` // BAD! Do not include a comma. + Field `validate:"excludesall=0x2C"` // GOOD! Use the UTF-8 hex representation. + } + +Pipe ("|") is the default separator of validation tags. If you wish to +have a pipe included within the parameter i.e. excludesall=| you will need to +use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe, +so the above will become excludesall=0x7C + + type Test struct { + Field `validate:"excludesall=|"` // BAD! Do not include a a pipe! + Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation. + } + + +Baked In Validators and Tags + +Here is a list of the current built in validators: + + +Skip Field + +Tells the validation to skip this struct field; this is particularly +handy in ignoring embedded structs from being validated. (Usage: -) + Usage: - + + +Or Operator + +This is the 'or' operator allowing multiple validators to be used and +accepted. (Usage: rbg|rgba) <-- this would allow either rgb or rgba +colors to be accepted. This can also be combined with 'and' for example +( Usage: omitempty,rgb|rgba) + + Usage: | + +StructOnly + +When a field that is a nested struct is encountered, and contains this flag +any validation on the nested struct will be run, but none of the nested +struct fields will be validated. This is usefull if inside of you program +you know the struct will be valid, but need to verify it has been assigned. +NOTE: only "required" and "omitempty" can be used on a struct itself. + + Usage: structonly + +NoStructLevel + +Same as structonly tag except that any struct level validations will not run. + + Usage: nostructlevel + +Exists + +Is a special tag without a validation function attached. It is used when a field +is a Pointer, Interface or Invalid and you wish to validate that it exists. +Example: want to ensure a bool exists if you define the bool as a pointer and +use exists it will ensure there is a value; couldn't use required as it would +fail when the bool was false. exists will fail is the value is a Pointer, Interface +or Invalid and is nil. + + Usage: exists + +Omit Empty + +Allows conditional validation, for example if a field is not set with +a value (Determined by the "required" validator) then other validation +such as min or max won't run, but if a value is set validation will run. + + Usage: omitempty + +Dive + +This tells the validator to dive into a slice, array or map and validate that +level of the slice, array or map with the validation tags that follow. +Multidimensional nesting is also supported, each level you wish to dive will +require another dive tag. + + Usage: dive + +Example #1 + + [][]string with validation tag "gt=0,dive,len=1,dive,required" + // gt=0 will be applied to [] + // len=1 will be applied to []string + // required will be applied to string + +Example #2 + + [][]string with validation tag "gt=0,dive,dive,required" + // gt=0 will be applied to [] + // []string will be spared validation + // required will be applied to string + +Required + +This validates that the value is not the data types default zero value. +For numbers ensures value is not zero. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required + +Length + +For numbers, max will ensure that the value is +equal to the parameter given. For strings, it checks that +the string length is exactly that number of characters. For slices, +arrays, and maps, validates the number of items. + + Usage: len=10 + +Maximum + +For numbers, max will ensure that the value is +less than or equal to the parameter given. For strings, it checks +that the string length is at most that number of characters. For +slices, arrays, and maps, validates the number of items. + + Usage: max=10 + +Mininum + +For numbers, min will ensure that the value is +greater or equal to the parameter given. For strings, it checks that +the string length is at least that number of characters. For slices, +arrays, and maps, validates the number of items. + + Usage: min=10 + +Equals + +For strings & numbers, eq will ensure that the value is +equal to the parameter given. For slices, arrays, and maps, +validates the number of items. + + Usage: eq=10 + +Not Equal + +For strings & numbers, ne will ensure that the value is not +equal to the parameter given. For slices, arrays, and maps, +validates the number of items. + + Usage: ne=10 + +Greater Than + +For numbers, this will ensure that the value is greater than the +parameter given. For strings, it checks that the string length +is greater than that number of characters. For slices, arrays +and maps it validates the number of items. + +Example #1 + + Usage: gt=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is greater than time.Now.UTC(). + + Usage: gt + +Greater Than or Equal + +Same as 'min' above. Kept both to make terminology with 'len' easier. + + +Example #1 + + Usage: gte=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is greater than or equal to time.Now.UTC(). + + Usage: gte + +Less Than + +For numbers, this will ensure that the value is less than the parameter given. +For strings, it checks that the string length is less than that number of +characters. For slices, arrays, and maps it validates the number of items. + +Example #1 + + Usage: lt=10 + +Example #2 (time.Time) +For time.Time ensures the time value is less than time.Now.UTC(). + + Usage: lt + +Less Than or Equal + +Same as 'max' above. Kept both to make terminology with 'len' easier. + +Example #1 + + Usage: lte=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is less than or equal to time.Now.UTC(). + + Usage: lte + +Field Equals Another Field + +This will validate the field value against another fields value either within +a struct or passed in field. + +Example #1: + + // Validation on Password field using: + Usage: eqfield=ConfirmPassword + +Example #2: + + // Validating by field: + validate.FieldWithValue(password, confirmpassword, "eqfield") + +Field Equals Another Field (relative) + +This does the same as eqfield except that it validates the field provided relative +to the top level struct. + + Usage: eqcsfield=InnerStructField.Field) + +Field Does Not Equal Another Field + +This will validate the field value against another fields value either within +a struct or passed in field. + +Examples: + + // Confirm two colors are not the same: + // + // Validation on Color field: + Usage: nefield=Color2 + + // Validating by field: + validate.FieldWithValue(color1, color2, "nefield") + +Field Does Not Equal Another Field (relative) + +This does the same as nefield except that it validates the field provided +relative to the top level struct. + + Usage: necsfield=InnerStructField.Field + +Field Greater Than Another Field + +Only valid for Numbers and time.Time types, this will validate the field value +against another fields value either within a struct or passed in field. +usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(gtfield=Start) + +Example #2: + + // Validating by field: + validate.FieldWithValue(start, end, "gtfield") + + +Field Greater Than Another Relative Field + +This does the same as gtfield except that it validates the field provided +relative to the top level struct. + + Usage: gtcsfield=InnerStructField.Field + +Field Greater Than or Equal To Another Field + +Only valid for Numbers and time.Time types, this will validate the field value +against another fields value either within a struct or passed in field. +usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(gtefield=Start) + +Example #2: + + // Validating by field: + validate.FieldWithValue(start, end, "gtefield") + +Field Greater Than or Equal To Another Relative Field + +This does the same as gtefield except that it validates the field provided relative +to the top level struct. + + Usage: gtecsfield=InnerStructField.Field + +Less Than Another Field + +Only valid for Numbers and time.Time types, this will validate the field value +against another fields value either within a struct or passed in field. +usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(ltfield=Start) + +Example #2: + + // Validating by field: + validate.FieldWithValue(start, end, "ltfield") + +Less Than Another Relative Field + +This does the same as ltfield except that it validates the field provided relative +to the top level struct. + + Usage: ltcsfield=InnerStructField.Field + +Less Than or Equal To Another Field + +Only valid for Numbers and time.Time types, this will validate the field value +against another fields value either within a struct or passed in field. +usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(ltefield=Start) + +Example #2: + + // Validating by field: + validate.FieldWithValue(start, end, "ltefield") + +Less Than or Equal To Another Relative Field + +This does the same as ltefield except that it validates the field provided relative +to the top level struct. + + Usage: ltecsfield=InnerStructField.Field + +Alpha Only + +This validates that a string value contains alpha characters only + + Usage: alpha + +Alphanumeric + +This validates that a string value contains alphanumeric characters only + + Usage: alphanum + +Numeric + +This validates that a string value contains a basic numeric value. +basic excludes exponents etc... + + Usage: numeric + +Hexadecimal String + +This validates that a string value contains a valid hexadecimal. + + Usage: hexadecimal + +Hexcolor String + +This validates that a string value contains a valid hex color including +hashtag (#) + + Usage: hexcolor + +RGB String + +This validates that a string value contains a valid rgb color + + Usage: rgb + +RGBA String + +This validates that a string value contains a valid rgba color + + Usage: rgba + +HSL String + +This validates that a string value contains a valid hsl color + + Usage: hsl + +HSLA String + +This validates that a string value contains a valid hsla color + + Usage: hsla + +E-mail String + +This validates that a string value contains a valid email +This may not conform to all possibilities of any rfc standard, but neither +does any email provider accept all posibilities. + + Usage: email + +URL String + +This validates that a string value contains a valid url +This will accept any url the golang request uri accepts but must contain +a schema for example http:// or rtmp:// + + Usage: url + +URI String + +This validates that a string value contains a valid uri +This will accept any uri the golang request uri accepts + + Usage: uri + +Base64 String + +This validates that a string value contains a valid base64 value. +Although an empty string is valid base64 this will report an empty string +as an error, if you wish to accept an empty string as valid you can use +this with the omitempty tag. + + Usage: base64 + +Contains + +This validates that a string value contains the substring value. + + Usage: contains=@ + +Contains Any + +This validates that a string value contains any Unicode code points +in the substring value. + + Usage: containsany=!@#? + +Contains Rune + +This validates that a string value contains the supplied rune value. + + Usage: containsrune=@ + +Excludes + +This validates that a string value does not contain the substring value. + + Usage: excludes=@ + +Excludes All + +This validates that a string value does not contain any Unicode code +points in the substring value. + + Usage: excludesall=!@#? + +Excludes Rune + +This validates that a string value does not contain the supplied rune value. + + Usage: excludesrune=@ + +International Standard Book Number + +This validates that a string value contains a valid isbn10 or isbn13 value. + + Usage: isbn + +International Standard Book Number 10 + +This validates that a string value contains a valid isbn10 value. + + Usage: isbn10 + +International Standard Book Number 13 + +This validates that a string value contains a valid isbn13 value. + + Usage: isbn13 + + +Universally Unique Identifier UUID + +This validates that a string value contains a valid UUID. + + Usage: uuid + +Universally Unique Identifier UUID v3 + +This validates that a string value contains a valid version 3 UUID. + + Usage: uuid3 + +Universally Unique Identifier UUID v4 + +This validates that a string value contains a valid version 4 UUID. + + Usage: uuid4 + +Universally Unique Identifier UUID v5 + +This validates that a string value contains a valid version 5 UUID. + + Usage: uuid5 + +ASCII + +This validates that a string value contains only ASCII characters. +NOTE: if the string is blank, this validates as true. + + Usage: ascii + +Printable ASCII + +This validates that a string value contains only printable ASCII characters. +NOTE: if the string is blank, this validates as true. + + Usage: asciiprint + +Multi-Byte Characters + +This validates that a string value contains one or more multibyte characters. +NOTE: if the string is blank, this validates as true. + + Usage: multibyte + +Data URL + +This validates that a string value contains a valid DataURI. +NOTE: this will also validate that the data portion is valid base64 + + Usage: datauri + +Latitude + +This validates that a string value contains a valid latitude. + + Usage: latitude + +Longitude + +This validates that a string value contains a valid longitude. + + Usage: longitude + +Social Security Number SSN + +This validates that a string value contains a valid U.S. Social Security Number. + + Usage: ssn + +Internet Protocol Address IP + +This validates that a string value contains a valid IP Adress. + + Usage: ip + +Internet Protocol Address IPv4 + +This validates that a string value contains a valid v4 IP Adress. + + Usage: ipv4 + +Internet Protocol Address IPv6 + +This validates that a string value contains a valid v6 IP Adress. + + Usage: ipv6 + +Classless Inter-Domain Routing CIDR + +This validates that a string value contains a valid CIDR Adress. + + Usage: cidr + +Classless Inter-Domain Routing CIDRv4 + +This validates that a string value contains a valid v4 CIDR Adress. + + Usage: cidrv4 + +Classless Inter-Domain Routing CIDRv6 + +This validates that a string value contains a valid v6 CIDR Adress. + + Usage: cidrv6 + +Transmission Control Protocol Address TCP + +This validates that a string value contains a valid resolvable TCP Adress. + + Usage: tcp_addr + +Transmission Control Protocol Address TCPv4 + +This validates that a string value contains a valid resolvable v4 TCP Adress. + + Usage: tcp4_addr + +Transmission Control Protocol Address TCPv6 + +This validates that a string value contains a valid resolvable v6 TCP Adress. + + Usage: tcp6_addr + +User Datagram Protocol Address UDP + +This validates that a string value contains a valid resolvable UDP Adress. + + Usage: udp_addr + +User Datagram Protocol Address UDPv4 + +This validates that a string value contains a valid resolvable v4 UDP Adress. + + Usage: udp4_addr + +User Datagram Protocol Address UDPv6 + +This validates that a string value contains a valid resolvable v6 UDP Adress. + + Usage: udp6_addr + +Internet Protocol Address IP + +This validates that a string value contains a valid resolvable IP Adress. + + Usage: ip_addr + +Internet Protocol Address IPv4 + +This validates that a string value contains a valid resolvable v4 IP Adress. + + Usage: ip4_addr + +Internet Protocol Address IPv6 + +This validates that a string value contains a valid resolvable v6 IP Adress. + + Usage: ip6_addr + +Unix domain socket end point Address + +This validates that a string value contains a valid Unix Adress. + + Usage: unix_addr + +Media Access Control Address MAC + +This validates that a string value contains a valid MAC Adress. + + Usage: mac + +Note: See Go's ParseMAC for accepted formats and types: + + http://golang.org/src/net/mac.go?s=866:918#L29 + +Alias Validators and Tags + +NOTE: When returning an error, the tag returned in "FieldError" will be +the alias tag unless the dive tag is part of the alias. Everything after the +dive tag is not reported as the alias tag. Also, the "ActualTag" in the before +case will be the actual tag within the alias that failed. + +Here is a list of the current built in alias tags: + + "iscolor" + alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor) + +Validator notes: + + regex + a regex validator won't be added because commas and = signs can be part + of a regex which conflict with the validation definitions. Although + workarounds can be made, they take away from using pure regex's. + Furthermore it's quick and dirty but the regex's become harder to + maintain and are not reusable, so it's as much a programming philosiphy + as anything. + + In place of this new validator functions should be created; a regex can + be used within the validator function and even be precompiled for better + efficiency within regexes.go. + + And the best reason, you can submit a pull request and we can keep on + adding to the validation library of this package! + +Panics + +This package panics when bad input is provided, this is by design, bad code like +that should not make it to production. + + type Test struct { + TestField string `validate:"nonexistantfunction=1"` + } + + t := &Test{ + TestField: "Test" + } + + validate.Struct(t) // this will panic +*/ +package validator diff --git a/vendor/gopkg.in/go-playground/validator.v8/examples/custom/custom.go b/vendor/gopkg.in/go-playground/validator.v8/examples/custom/custom.go new file mode 100644 index 000000000..ee14bd2f8 --- /dev/null +++ b/vendor/gopkg.in/go-playground/validator.v8/examples/custom/custom.go @@ -0,0 +1,45 @@ +package main + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + + "gopkg.in/go-playground/validator.v8" +) + +// DbBackedUser User struct +type DbBackedUser struct { + Name sql.NullString `validate:"required"` + Age sql.NullInt64 `validate:"required"` +} + +func main() { + + config := &validator.Config{TagName: "validate"} + + validate := validator.New(config) + + // register all sql.Null* types to use the ValidateValuer CustomTypeFunc + validate.RegisterCustomTypeFunc(ValidateValuer, sql.NullString{}, sql.NullInt64{}, sql.NullBool{}, sql.NullFloat64{}) + + x := DbBackedUser{Name: sql.NullString{String: "", Valid: true}, Age: sql.NullInt64{Int64: 0, Valid: false}} + errs := validate.Struct(x) + + if errs != nil { + fmt.Printf("Errs:\n%+v\n", errs) + } +} + +// ValidateValuer implements validator.CustomTypeFunc +func ValidateValuer(field reflect.Value) interface{} { + if valuer, ok := field.Interface().(driver.Valuer); ok { + val, err := valuer.Value() + if err == nil { + return val + } + // handle the error how you want + } + return nil +} diff --git a/vendor/gopkg.in/go-playground/validator.v8/examples/simple/simple.go b/vendor/gopkg.in/go-playground/validator.v8/examples/simple/simple.go new file mode 100644 index 000000000..d16cc830a --- /dev/null +++ b/vendor/gopkg.in/go-playground/validator.v8/examples/simple/simple.go @@ -0,0 +1,155 @@ +package main + +import ( + "errors" + "fmt" + "reflect" + + sql "database/sql/driver" + + "gopkg.in/go-playground/validator.v8" +) + +// User contains user information +type User struct { + FirstName string `validate:"required"` + LastName string `validate:"required"` + Age uint8 `validate:"gte=0,lte=130"` + Email string `validate:"required,email"` + FavouriteColor string `validate:"hexcolor|rgb|rgba"` + Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage... +} + +// Address houses a users address information +type Address struct { + Street string `validate:"required"` + City string `validate:"required"` + Planet string `validate:"required"` + Phone string `validate:"required"` +} + +var validate *validator.Validate + +func main() { + + config := &validator.Config{TagName: "validate"} + + validate = validator.New(config) + + validateStruct() + validateField() +} + +func validateStruct() { + + address := &Address{ + Street: "Eavesdown Docks", + Planet: "Persphone", + Phone: "none", + } + + user := &User{ + FirstName: "Badger", + LastName: "Smith", + Age: 135, + Email: "Badger.Smith@gmail.com", + FavouriteColor: "#000", + Addresses: []*Address{address}, + } + + // returns nil or ValidationErrors ( map[string]*FieldError ) + errs := validate.Struct(user) + + if errs != nil { + + fmt.Println(errs) // output: Key: "User.Age" Error:Field validation for "Age" failed on the "lte" tag + // Key: "User.Addresses[0].City" Error:Field validation for "City" failed on the "required" tag + err := errs.(validator.ValidationErrors)["User.Addresses[0].City"] + fmt.Println(err.Field) // output: City + fmt.Println(err.Tag) // output: required + fmt.Println(err.Kind) // output: string + fmt.Println(err.Type) // output: string + fmt.Println(err.Param) // output: + fmt.Println(err.Value) // output: + + // from here you can create your own error messages in whatever language you wish + return + } + + // save user to database +} + +func validateField() { + myEmail := "joeybloggs.gmail.com" + + errs := validate.Field(myEmail, "required,email") + + if errs != nil { + fmt.Println(errs) // output: Key: "" Error:Field validation for "" failed on the "email" tag + return + } + + // email ok, move on +} + +var validate2 *validator.Validate + +type valuer struct { + Name string +} + +func (v valuer) Value() (sql.Value, error) { + + if v.Name == "errorme" { + return nil, errors.New("some kind of error") + } + + if v.Name == "blankme" { + return "", nil + } + + if len(v.Name) == 0 { + return nil, nil + } + + return v.Name, nil +} + +// ValidateValuerType implements validator.CustomTypeFunc +func ValidateValuerType(field reflect.Value) interface{} { + if valuer, ok := field.Interface().(sql.Valuer); ok { + val, err := valuer.Value() + if err != nil { + // handle the error how you want + return nil + } + + return val + } + + return nil +} + +func main2() { + + config := &validator.Config{TagName: "validate"} + + validate2 = validator.New(config) + validate2.RegisterCustomTypeFunc(ValidateValuerType, (*sql.Valuer)(nil), valuer{}) + + validateCustomFieldType() +} + +func validateCustomFieldType() { + val := valuer{ + Name: "blankme", + } + + errs := validate2.Field(val, "required") + if errs != nil { + fmt.Println(errs) // output: Key: "" Error:Field validation for "" failed on the "required" tag + return + } + + // all ok +} diff --git a/vendor/gopkg.in/go-playground/validator.v8/examples/struct-level/struct_level.go b/vendor/gopkg.in/go-playground/validator.v8/examples/struct-level/struct_level.go new file mode 100644 index 000000000..92526c95b --- /dev/null +++ b/vendor/gopkg.in/go-playground/validator.v8/examples/struct-level/struct_level.go @@ -0,0 +1,99 @@ +package main + +import ( + "fmt" + "reflect" + + "gopkg.in/go-playground/validator.v8" +) + +// User contains user information +type User struct { + FirstName string `json:"fname"` + LastName string `json:"lname"` + Age uint8 `validate:"gte=0,lte=130"` + Email string `validate:"required,email"` + FavouriteColor string `validate:"hexcolor|rgb|rgba"` + Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage... +} + +// Address houses a users address information +type Address struct { + Street string `validate:"required"` + City string `validate:"required"` + Planet string `validate:"required"` + Phone string `validate:"required"` +} + +var validate *validator.Validate + +func main() { + + config := &validator.Config{TagName: "validate"} + + validate = validator.New(config) + validate.RegisterStructValidation(UserStructLevelValidation, User{}) + + validateStruct() +} + +// UserStructLevelValidation contains custom struct level validations that don't always +// make sense at the field validation level. For Example this function validates that either +// FirstName or LastName exist; could have done that with a custom field validation but then +// would have had to add it to both fields duplicating the logic + overhead, this way it's +// only validated once. +// +// NOTE: you may ask why wouldn't I just do this outside of validator, because doing this way +// hooks right into validator and you can combine with validation tags and still have a +// common error output format. +func UserStructLevelValidation(v *validator.Validate, structLevel *validator.StructLevel) { + + user := structLevel.CurrentStruct.Interface().(User) + + if len(user.FirstName) == 0 && len(user.LastName) == 0 { + structLevel.ReportError(reflect.ValueOf(user.FirstName), "FirstName", "fname", "fnameorlname") + structLevel.ReportError(reflect.ValueOf(user.LastName), "LastName", "lname", "fnameorlname") + } + + // plus can to more, even with different tag than "fnameorlname" +} + +func validateStruct() { + + address := &Address{ + Street: "Eavesdown Docks", + Planet: "Persphone", + Phone: "none", + City: "Unknown", + } + + user := &User{ + FirstName: "", + LastName: "", + Age: 45, + Email: "Badger.Smith@gmail.com", + FavouriteColor: "#000", + Addresses: []*Address{address}, + } + + // returns nil or ValidationErrors ( map[string]*FieldError ) + errs := validate.Struct(user) + + if errs != nil { + + fmt.Println(errs) // output: Key: 'User.LastName' Error:Field validation for 'LastName' failed on the 'fnameorlname' tag + // Key: 'User.FirstName' Error:Field validation for 'FirstName' failed on the 'fnameorlname' tag + err := errs.(validator.ValidationErrors)["User.FirstName"] + fmt.Println(err.Field) // output: FirstName + fmt.Println(err.Tag) // output: fnameorlname + fmt.Println(err.Kind) // output: string + fmt.Println(err.Type) // output: string + fmt.Println(err.Param) // output: + fmt.Println(err.Value) // output: + + // from here you can create your own error messages in whatever language you wish + return + } + + // save user to database +} diff --git a/vendor/gopkg.in/go-playground/validator.v8/examples_test.go b/vendor/gopkg.in/go-playground/validator.v8/examples_test.go new file mode 100644 index 000000000..fde22461a --- /dev/null +++ b/vendor/gopkg.in/go-playground/validator.v8/examples_test.go @@ -0,0 +1,83 @@ +package validator_test + +import ( + "fmt" + + "gopkg.in/go-playground/validator.v8" +) + +func ExampleValidate_new() { + config := &validator.Config{TagName: "validate"} + + validator.New(config) +} + +func ExampleValidate_field() { + // This should be stored somewhere globally + var validate *validator.Validate + + config := &validator.Config{TagName: "validate"} + + validate = validator.New(config) + + i := 0 + errs := validate.Field(i, "gt=1,lte=10") + err := errs.(validator.ValidationErrors)[""] + fmt.Println(err.Field) + fmt.Println(err.Tag) + fmt.Println(err.Kind) // NOTE: Kind and Type can be different i.e. time Kind=struct and Type=time.Time + fmt.Println(err.Type) + fmt.Println(err.Param) + fmt.Println(err.Value) + //Output: + // + //gt + //int + //int + //1 + //0 +} + +func ExampleValidate_struct() { + // This should be stored somewhere globally + var validate *validator.Validate + + config := &validator.Config{TagName: "validate"} + + validate = validator.New(config) + + type ContactInformation struct { + Phone string `validate:"required"` + Street string `validate:"required"` + City string `validate:"required"` + } + + type User struct { + Name string `validate:"required,excludesall=!@#$%^&*()_+-=:;?/0x2C"` // 0x2C = comma (,) + Age int8 `validate:"required,gt=0,lt=150"` + Email string `validate:"email"` + ContactInformation []*ContactInformation + } + + contactInfo := &ContactInformation{ + Street: "26 Here Blvd.", + City: "Paradeso", + } + + user := &User{ + Name: "Joey Bloggs", + Age: 31, + Email: "joeybloggs@gmail.com", + ContactInformation: []*ContactInformation{contactInfo}, + } + + errs := validate.Struct(user) + for _, v := range errs.(validator.ValidationErrors) { + fmt.Println(v.Field) // Phone + fmt.Println(v.Tag) // required + //... and so forth + //Output: + //Phone + //required + } +} diff --git a/vendor/gopkg.in/go-playground/validator.v8/logo.png b/vendor/gopkg.in/go-playground/validator.v8/logo.png new file mode 100644 index 000000000..355000f52 Binary files /dev/null and b/vendor/gopkg.in/go-playground/validator.v8/logo.png differ diff --git a/vendor/gopkg.in/go-playground/validator.v8/regexes.go b/vendor/gopkg.in/go-playground/validator.v8/regexes.go new file mode 100644 index 000000000..83ae1982a --- /dev/null +++ b/vendor/gopkg.in/go-playground/validator.v8/regexes.go @@ -0,0 +1,59 @@ +package validator + +import "regexp" + +const ( + alphaRegexString = "^[a-zA-Z]+$" + alphaNumericRegexString = "^[a-zA-Z0-9]+$" + numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$" + numberRegexString = "^[0-9]+$" + hexadecimalRegexString = "^[0-9a-fA-F]+$" + hexcolorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$" + rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" + hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$" + hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" + emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:\\(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22)))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$" + iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$" + uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + aSCIIRegexString = "^[\x00-\x7F]*$" + printableASCIIRegexString = "^[\x20-\x7E]*$" + multibyteRegexString = "[^\x00-\x7F]" + dataURIRegexString = "^data:.+\\/(.+);base64$" + latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + sSNRegexString = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` +) + +var ( + alphaRegex = regexp.MustCompile(alphaRegexString) + alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString) + numericRegex = regexp.MustCompile(numericRegexString) + numberRegex = regexp.MustCompile(numberRegexString) + hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString) + hexcolorRegex = regexp.MustCompile(hexcolorRegexString) + rgbRegex = regexp.MustCompile(rgbRegexString) + rgbaRegex = regexp.MustCompile(rgbaRegexString) + hslRegex = regexp.MustCompile(hslRegexString) + hslaRegex = regexp.MustCompile(hslaRegexString) + emailRegex = regexp.MustCompile(emailRegexString) + base64Regex = regexp.MustCompile(base64RegexString) + iSBN10Regex = regexp.MustCompile(iSBN10RegexString) + iSBN13Regex = regexp.MustCompile(iSBN13RegexString) + uUID3Regex = regexp.MustCompile(uUID3RegexString) + uUID4Regex = regexp.MustCompile(uUID4RegexString) + uUID5Regex = regexp.MustCompile(uUID5RegexString) + uUIDRegex = regexp.MustCompile(uUIDRegexString) + aSCIIRegex = regexp.MustCompile(aSCIIRegexString) + printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString) + multibyteRegex = regexp.MustCompile(multibyteRegexString) + dataURIRegex = regexp.MustCompile(dataURIRegexString) + latitudeRegex = regexp.MustCompile(latitudeRegexString) + longitudeRegex = regexp.MustCompile(longitudeRegexString) + sSNRegex = regexp.MustCompile(sSNRegexString) +) diff --git a/vendor/gopkg.in/go-playground/validator.v8/util.go b/vendor/gopkg.in/go-playground/validator.v8/util.go new file mode 100644 index 000000000..e81157909 --- /dev/null +++ b/vendor/gopkg.in/go-playground/validator.v8/util.go @@ -0,0 +1,252 @@ +package validator + +import ( + "reflect" + "strconv" + "strings" +) + +const ( + blank = "" + namespaceSeparator = "." + leftBracket = "[" + rightBracket = "]" + restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}" + restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" + restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" +) + +var ( + restrictedTags = map[string]struct{}{ + diveTag: {}, + existsTag: {}, + structOnlyTag: {}, + omitempty: {}, + skipValidationTag: {}, + utf8HexComma: {}, + utf8Pipe: {}, + noStructLevelTag: {}, + } +) + +// ExtractType gets the actual underlying type of field value. +// It will dive into pointers, customTypes and return you the +// underlying value and it's kind. +// it is exposed for use within you Custom Functions +func (v *Validate) ExtractType(current reflect.Value) (reflect.Value, reflect.Kind) { + + val, k, _ := v.extractTypeInternal(current, false) + return val, k +} + +// only exists to not break backward compatibility, needed to return the third param for a bug fix internally +func (v *Validate) extractTypeInternal(current reflect.Value, nullable bool) (reflect.Value, reflect.Kind, bool) { + + switch current.Kind() { + case reflect.Ptr: + + nullable = true + + if current.IsNil() { + return current, reflect.Ptr, nullable + } + + return v.extractTypeInternal(current.Elem(), nullable) + + case reflect.Interface: + + nullable = true + + if current.IsNil() { + return current, reflect.Interface, nullable + } + + return v.extractTypeInternal(current.Elem(), nullable) + + case reflect.Invalid: + return current, reflect.Invalid, nullable + + default: + + if v.hasCustomFuncs { + + if fn, ok := v.customTypeFuncs[current.Type()]; ok { + return v.extractTypeInternal(reflect.ValueOf(fn(current)), nullable) + } + } + + return current, current.Kind(), nullable + } +} + +// GetStructFieldOK traverses a struct to retrieve a specific field denoted by the provided namespace and +// returns the field, field kind and whether is was successful in retrieving the field at all. +// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field +// could not be retrieved because it didn't exist. +func (v *Validate) GetStructFieldOK(current reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) { + + current, kind := v.ExtractType(current) + + if kind == reflect.Invalid { + return current, kind, false + } + + if namespace == blank { + return current, kind, true + } + + switch kind { + + case reflect.Ptr, reflect.Interface: + + return current, kind, false + + case reflect.Struct: + + typ := current.Type() + fld := namespace + ns := namespace + + if typ != timeType && typ != timePtrType { + + idx := strings.Index(namespace, namespaceSeparator) + + if idx != -1 { + fld = namespace[:idx] + ns = namespace[idx+1:] + } else { + ns = blank + } + + bracketIdx := strings.Index(fld, leftBracket) + if bracketIdx != -1 { + fld = fld[:bracketIdx] + + ns = namespace[bracketIdx:] + } + + current = current.FieldByName(fld) + + return v.GetStructFieldOK(current, ns) + } + + case reflect.Array, reflect.Slice: + idx := strings.Index(namespace, leftBracket) + idx2 := strings.Index(namespace, rightBracket) + + arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2]) + + if arrIdx >= current.Len() { + return current, kind, false + } + + startIdx := idx2 + 1 + + if startIdx < len(namespace) { + if namespace[startIdx:startIdx+1] == namespaceSeparator { + startIdx++ + } + } + + return v.GetStructFieldOK(current.Index(arrIdx), namespace[startIdx:]) + + case reflect.Map: + idx := strings.Index(namespace, leftBracket) + 1 + idx2 := strings.Index(namespace, rightBracket) + + endIdx := idx2 + + if endIdx+1 < len(namespace) { + if namespace[endIdx+1:endIdx+2] == namespaceSeparator { + endIdx++ + } + } + + key := namespace[idx:idx2] + + switch current.Type().Key().Kind() { + case reflect.Int: + i, _ := strconv.Atoi(key) + return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:]) + case reflect.Int8: + i, _ := strconv.ParseInt(key, 10, 8) + return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int8(i))), namespace[endIdx+1:]) + case reflect.Int16: + i, _ := strconv.ParseInt(key, 10, 16) + return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int16(i))), namespace[endIdx+1:]) + case reflect.Int32: + i, _ := strconv.ParseInt(key, 10, 32) + return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int32(i))), namespace[endIdx+1:]) + case reflect.Int64: + i, _ := strconv.ParseInt(key, 10, 64) + return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:]) + case reflect.Uint: + i, _ := strconv.ParseUint(key, 10, 0) + return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint(i))), namespace[endIdx+1:]) + case reflect.Uint8: + i, _ := strconv.ParseUint(key, 10, 8) + return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint8(i))), namespace[endIdx+1:]) + case reflect.Uint16: + i, _ := strconv.ParseUint(key, 10, 16) + return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint16(i))), namespace[endIdx+1:]) + case reflect.Uint32: + i, _ := strconv.ParseUint(key, 10, 32) + return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint32(i))), namespace[endIdx+1:]) + case reflect.Uint64: + i, _ := strconv.ParseUint(key, 10, 64) + return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:]) + case reflect.Float32: + f, _ := strconv.ParseFloat(key, 32) + return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(float32(f))), namespace[endIdx+1:]) + case reflect.Float64: + f, _ := strconv.ParseFloat(key, 64) + return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(f)), namespace[endIdx+1:]) + case reflect.Bool: + b, _ := strconv.ParseBool(key) + return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(b)), namespace[endIdx+1:]) + + // reflect.Type = string + default: + return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(key)), namespace[endIdx+1:]) + } + } + + // if got here there was more namespace, cannot go any deeper + panic("Invalid field namespace") +} + +// asInt returns the parameter as a int64 +// or panics if it can't convert +func asInt(param string) int64 { + + i, err := strconv.ParseInt(param, 0, 64) + panicIf(err) + + return i +} + +// asUint returns the parameter as a uint64 +// or panics if it can't convert +func asUint(param string) uint64 { + + i, err := strconv.ParseUint(param, 0, 64) + panicIf(err) + + return i +} + +// asFloat returns the parameter as a float64 +// or panics if it can't convert +func asFloat(param string) float64 { + + i, err := strconv.ParseFloat(param, 64) + panicIf(err) + + return i +} + +func panicIf(err error) { + if err != nil { + panic(err.Error()) + } +} diff --git a/vendor/gopkg.in/go-playground/validator.v8/validator.go b/vendor/gopkg.in/go-playground/validator.v8/validator.go new file mode 100644 index 000000000..2b1d7164d --- /dev/null +++ b/vendor/gopkg.in/go-playground/validator.v8/validator.go @@ -0,0 +1,782 @@ +/** + * Package validator + * + * MISC: + * - anonymous structs - they don't have names so expect the Struct name within StructErrors to be blank + * + */ + +package validator + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "sync" + "time" +) + +const ( + utf8HexComma = "0x2C" + utf8Pipe = "0x7C" + tagSeparator = "," + orSeparator = "|" + tagKeySeparator = "=" + structOnlyTag = "structonly" + noStructLevelTag = "nostructlevel" + omitempty = "omitempty" + skipValidationTag = "-" + diveTag = "dive" + existsTag = "exists" + fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag" + arrayIndexFieldName = "%s" + leftBracket + "%d" + rightBracket + mapIndexFieldName = "%s" + leftBracket + "%v" + rightBracket + invalidValidation = "Invalid validation tag on field %s" + undefinedValidation = "Undefined validation function on field %s" + validatorNotInitialized = "Validator instance not initialized" + fieldNameRequired = "Field Name Required" + tagRequired = "Tag Required" +) + +var ( + timeType = reflect.TypeOf(time.Time{}) + timePtrType = reflect.TypeOf(&time.Time{}) + defaultCField = new(cField) +) + +// StructLevel contains all of the information and helper methods +// for reporting errors during struct level validation +type StructLevel struct { + TopStruct reflect.Value + CurrentStruct reflect.Value + errPrefix string + nsPrefix string + errs ValidationErrors + v *Validate +} + +// ReportValidationErrors accepts the key relative to the top level struct and validatin errors. +// Example: had a triple nested struct User, ContactInfo, Country and ran errs := validate.Struct(country) +// from within a User struct level validation would call this method like so: +// ReportValidationErrors("ContactInfo.", errs) +// NOTE: relativeKey can contain both the Field Relative and Custom name relative paths +// i.e. ReportValidationErrors("ContactInfo.|cInfo", errs) where cInfo represents say the JSON name of +// the relative path; this will be split into 2 variables in the next valiator version. +func (sl *StructLevel) ReportValidationErrors(relativeKey string, errs ValidationErrors) { + for _, e := range errs { + + idx := strings.Index(relativeKey, "|") + var rel string + var cRel string + + if idx != -1 { + rel = relativeKey[:idx] + cRel = relativeKey[idx+1:] + } else { + rel = relativeKey + } + + key := sl.errPrefix + rel + e.Field + + e.FieldNamespace = key + e.NameNamespace = sl.nsPrefix + cRel + e.Name + + sl.errs[key] = e + } +} + +// ReportError reports an error just by passing the field and tag information +// NOTE: tag can be an existing validation tag or just something you make up +// and precess on the flip side it's up to you. +func (sl *StructLevel) ReportError(field reflect.Value, fieldName string, customName string, tag string) { + + field, kind := sl.v.ExtractType(field) + + if fieldName == blank { + panic(fieldNameRequired) + } + + if customName == blank { + customName = fieldName + } + + if tag == blank { + panic(tagRequired) + } + + ns := sl.errPrefix + fieldName + + switch kind { + case reflect.Invalid: + sl.errs[ns] = &FieldError{ + FieldNamespace: ns, + NameNamespace: sl.nsPrefix + customName, + Name: customName, + Field: fieldName, + Tag: tag, + ActualTag: tag, + Param: blank, + Kind: kind, + } + default: + sl.errs[ns] = &FieldError{ + FieldNamespace: ns, + NameNamespace: sl.nsPrefix + customName, + Name: customName, + Field: fieldName, + Tag: tag, + ActualTag: tag, + Param: blank, + Value: field.Interface(), + Kind: kind, + Type: field.Type(), + } + } +} + +// Validate contains the validator settings passed in using the Config struct +type Validate struct { + tagName string + fieldNameTag string + validationFuncs map[string]Func + structLevelFuncs map[reflect.Type]StructLevelFunc + customTypeFuncs map[reflect.Type]CustomTypeFunc + aliasValidators map[string]string + hasCustomFuncs bool + hasAliasValidators bool + hasStructLevelFuncs bool + tagCache *tagCache + structCache *structCache + errsPool *sync.Pool +} + +func (v *Validate) initCheck() { + if v == nil { + panic(validatorNotInitialized) + } +} + +// Config contains the options that a Validator instance will use. +// It is passed to the New() function +type Config struct { + TagName string + FieldNameTag string +} + +// CustomTypeFunc allows for overriding or adding custom field type handler functions +// field = field value of the type to return a value to be validated +// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29 +type CustomTypeFunc func(field reflect.Value) interface{} + +// Func accepts all values needed for file and cross field validation +// v = validator instance, needed but some built in functions for it's custom types +// topStruct = top level struct when validating by struct otherwise nil +// currentStruct = current level struct when validating by struct otherwise optional comparison value +// field = field value for validation +// param = parameter used in validation i.e. gt=0 param would be 0 +type Func func(v *Validate, topStruct reflect.Value, currentStruct reflect.Value, field reflect.Value, fieldtype reflect.Type, fieldKind reflect.Kind, param string) bool + +// StructLevelFunc accepts all values needed for struct level validation +type StructLevelFunc func(v *Validate, structLevel *StructLevel) + +// ValidationErrors is a type of map[string]*FieldError +// it exists to allow for multiple errors to be passed from this library +// and yet still subscribe to the error interface +type ValidationErrors map[string]*FieldError + +// Error is intended for use in development + debugging and not intended to be a production error message. +// It allows ValidationErrors to subscribe to the Error interface. +// All information to create an error message specific to your application is contained within +// the FieldError found within the ValidationErrors map +func (ve ValidationErrors) Error() string { + + buff := bytes.NewBufferString(blank) + + for key, err := range ve { + buff.WriteString(fmt.Sprintf(fieldErrMsg, key, err.Field, err.Tag)) + buff.WriteString("\n") + } + + return strings.TrimSpace(buff.String()) +} + +// FieldError contains a single field's validation error along +// with other properties that may be needed for error message creation +type FieldError struct { + FieldNamespace string + NameNamespace string + Field string + Name string + Tag string + ActualTag string + Kind reflect.Kind + Type reflect.Type + Param string + Value interface{} +} + +// New creates a new Validate instance for use. +func New(config *Config) *Validate { + + tc := new(tagCache) + tc.m.Store(make(map[string]*cTag)) + + sc := new(structCache) + sc.m.Store(make(map[reflect.Type]*cStruct)) + + v := &Validate{ + tagName: config.TagName, + fieldNameTag: config.FieldNameTag, + tagCache: tc, + structCache: sc, + errsPool: &sync.Pool{New: func() interface{} { + return ValidationErrors{} + }}} + + if len(v.aliasValidators) == 0 { + // must copy alias validators for separate validations to be used in each validator instance + v.aliasValidators = map[string]string{} + for k, val := range bakedInAliasValidators { + v.RegisterAliasValidation(k, val) + } + } + + if len(v.validationFuncs) == 0 { + // must copy validators for separate validations to be used in each instance + v.validationFuncs = map[string]Func{} + for k, val := range bakedInValidators { + v.RegisterValidation(k, val) + } + } + + return v +} + +// RegisterStructValidation registers a StructLevelFunc against a number of types +// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) { + v.initCheck() + + if v.structLevelFuncs == nil { + v.structLevelFuncs = map[reflect.Type]StructLevelFunc{} + } + + for _, t := range types { + v.structLevelFuncs[reflect.TypeOf(t)] = fn + } + + v.hasStructLevelFuncs = true +} + +// RegisterValidation adds a validation Func to a Validate's map of validators denoted by the key +// NOTE: if the key already exists, the previous validation function will be replaced. +// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterValidation(key string, fn Func) error { + v.initCheck() + + if key == blank { + return errors.New("Function Key cannot be empty") + } + + if fn == nil { + return errors.New("Function cannot be empty") + } + + _, ok := restrictedTags[key] + + if ok || strings.ContainsAny(key, restrictedTagChars) { + panic(fmt.Sprintf(restrictedTagErr, key)) + } + + v.validationFuncs[key] = fn + + return nil +} + +// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types +// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) { + v.initCheck() + + if v.customTypeFuncs == nil { + v.customTypeFuncs = map[reflect.Type]CustomTypeFunc{} + } + + for _, t := range types { + v.customTypeFuncs[reflect.TypeOf(t)] = fn + } + + v.hasCustomFuncs = true +} + +// RegisterAliasValidation registers a mapping of a single validationstag that +// defines a common or complex set of validation(s) to simplify adding validation +// to structs. NOTE: when returning an error the tag returned in FieldError will be +// the alias tag unless the dive tag is part of the alias; everything after the +// dive tag is not reported as the alias tag. Also the ActualTag in the before case +// will be the actual tag within the alias that failed. +// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterAliasValidation(alias, tags string) { + v.initCheck() + + _, ok := restrictedTags[alias] + + if ok || strings.ContainsAny(alias, restrictedTagChars) { + panic(fmt.Sprintf(restrictedAliasErr, alias)) + } + + v.aliasValidators[alias] = tags + v.hasAliasValidators = true +} + +// Field validates a single field using tag style validation and returns nil or ValidationErrors as type error. +// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors. +// NOTE: it returns ValidationErrors instead of a single FieldError because this can also +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) Field(field interface{}, tag string) error { + v.initCheck() + + if len(tag) == 0 || tag == skipValidationTag { + return nil + } + + errs := v.errsPool.Get().(ValidationErrors) + fieldVal := reflect.ValueOf(field) + + ctag, ok := v.tagCache.Get(tag) + if !ok { + v.tagCache.lock.Lock() + defer v.tagCache.lock.Unlock() + + // could have been multiple trying to access, but once first is done this ensures tag + // isn't parsed again. + ctag, ok = v.tagCache.Get(tag) + if !ok { + ctag, _ = v.parseFieldTagsRecursive(tag, blank, blank, false) + v.tagCache.Set(tag, ctag) + } + } + + v.traverseField(fieldVal, fieldVal, fieldVal, blank, blank, errs, false, false, nil, nil, defaultCField, ctag) + + if len(errs) == 0 { + v.errsPool.Put(errs) + return nil + } + + return errs +} + +// FieldWithValue validates a single field, against another fields value using tag style validation and returns nil or ValidationErrors. +// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors. +// NOTE: it returns ValidationErrors instead of a single FieldError because this can also +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) FieldWithValue(val interface{}, field interface{}, tag string) error { + v.initCheck() + + if len(tag) == 0 || tag == skipValidationTag { + return nil + } + + errs := v.errsPool.Get().(ValidationErrors) + topVal := reflect.ValueOf(val) + + ctag, ok := v.tagCache.Get(tag) + if !ok { + v.tagCache.lock.Lock() + defer v.tagCache.lock.Unlock() + + // could have been multiple trying to access, but once first is done this ensures tag + // isn't parsed again. + ctag, ok = v.tagCache.Get(tag) + if !ok { + ctag, _ = v.parseFieldTagsRecursive(tag, blank, blank, false) + v.tagCache.Set(tag, ctag) + } + } + + v.traverseField(topVal, topVal, reflect.ValueOf(field), blank, blank, errs, false, false, nil, nil, defaultCField, ctag) + + if len(errs) == 0 { + v.errsPool.Put(errs) + return nil + } + + return errs +} + +// StructPartial validates the fields passed in only, ignoring all others. +// Fields may be provided in a namespaced fashion relative to the struct provided +// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name and returns nil or ValidationErrors as error +// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors. +func (v *Validate) StructPartial(current interface{}, fields ...string) error { + v.initCheck() + + sv, _ := v.ExtractType(reflect.ValueOf(current)) + name := sv.Type().Name() + m := map[string]struct{}{} + + if fields != nil { + for _, k := range fields { + + flds := strings.Split(k, namespaceSeparator) + if len(flds) > 0 { + + key := name + namespaceSeparator + for _, s := range flds { + + idx := strings.Index(s, leftBracket) + + if idx != -1 { + for idx != -1 { + key += s[:idx] + m[key] = struct{}{} + + idx2 := strings.Index(s, rightBracket) + idx2++ + key += s[idx:idx2] + m[key] = struct{}{} + s = s[idx2:] + idx = strings.Index(s, leftBracket) + } + } else { + + key += s + m[key] = struct{}{} + } + + key += namespaceSeparator + } + } + } + } + + errs := v.errsPool.Get().(ValidationErrors) + + v.ensureValidStruct(sv, sv, sv, blank, blank, errs, true, len(m) != 0, false, m, false) + + if len(errs) == 0 { + v.errsPool.Put(errs) + return nil + } + + return errs +} + +// StructExcept validates all fields except the ones passed in. +// Fields may be provided in a namespaced fashion relative to the struct provided +// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name and returns nil or ValidationErrors as error +// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors. +func (v *Validate) StructExcept(current interface{}, fields ...string) error { + v.initCheck() + + sv, _ := v.ExtractType(reflect.ValueOf(current)) + name := sv.Type().Name() + m := map[string]struct{}{} + + for _, key := range fields { + m[name+namespaceSeparator+key] = struct{}{} + } + + errs := v.errsPool.Get().(ValidationErrors) + + v.ensureValidStruct(sv, sv, sv, blank, blank, errs, true, len(m) != 0, true, m, false) + + if len(errs) == 0 { + v.errsPool.Put(errs) + return nil + } + + return errs +} + +// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified. +// it returns nil or ValidationErrors as error. +// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors. +func (v *Validate) Struct(current interface{}) error { + v.initCheck() + + errs := v.errsPool.Get().(ValidationErrors) + sv := reflect.ValueOf(current) + + v.ensureValidStruct(sv, sv, sv, blank, blank, errs, true, false, false, nil, false) + + if len(errs) == 0 { + v.errsPool.Put(errs) + return nil + } + + return errs +} + +func (v *Validate) ensureValidStruct(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, useStructName bool, partial bool, exclude bool, includeExclude map[string]struct{}, isStructOnly bool) { + + if current.Kind() == reflect.Ptr && !current.IsNil() { + current = current.Elem() + } + + if current.Kind() != reflect.Struct && current.Kind() != reflect.Interface { + panic("value passed for validation is not a struct") + } + + v.tranverseStruct(topStruct, currentStruct, current, errPrefix, nsPrefix, errs, useStructName, partial, exclude, includeExclude, nil, nil) +} + +// tranverseStruct traverses a structs fields and then passes them to be validated by traverseField +func (v *Validate) tranverseStruct(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, useStructName bool, partial bool, exclude bool, includeExclude map[string]struct{}, cs *cStruct, ct *cTag) { + + var ok bool + first := len(nsPrefix) == 0 + typ := current.Type() + + cs, ok = v.structCache.Get(typ) + if !ok { + cs = v.extractStructCache(current, typ.Name()) + } + + if useStructName { + errPrefix += cs.Name + namespaceSeparator + + if len(v.fieldNameTag) != 0 { + nsPrefix += cs.Name + namespaceSeparator + } + } + + // structonly tag present don't tranverseFields + // but must still check and run below struct level validation + // if present + if first || ct == nil || ct.typeof != typeStructOnly { + + for _, f := range cs.fields { + + if partial { + + _, ok = includeExclude[errPrefix+f.Name] + + if (ok && exclude) || (!ok && !exclude) { + continue + } + } + + v.traverseField(topStruct, currentStruct, current.Field(f.Idx), errPrefix, nsPrefix, errs, partial, exclude, includeExclude, cs, f, f.cTags) + } + } + + // check if any struct level validations, after all field validations already checked. + if cs.fn != nil { + cs.fn(v, &StructLevel{v: v, TopStruct: topStruct, CurrentStruct: current, errPrefix: errPrefix, nsPrefix: nsPrefix, errs: errs}) + } +} + +// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options +func (v *Validate) traverseField(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, partial bool, exclude bool, includeExclude map[string]struct{}, cs *cStruct, cf *cField, ct *cTag) { + + current, kind, nullable := v.extractTypeInternal(current, false) + var typ reflect.Type + + switch kind { + case reflect.Ptr, reflect.Interface, reflect.Invalid: + + if ct == nil { + return + } + + if ct.typeof == typeOmitEmpty { + return + } + + if ct.hasTag { + + ns := errPrefix + cf.Name + + if kind == reflect.Invalid { + errs[ns] = &FieldError{ + FieldNamespace: ns, + NameNamespace: nsPrefix + cf.AltName, + Name: cf.AltName, + Field: cf.Name, + Tag: ct.aliasTag, + ActualTag: ct.tag, + Param: ct.param, + Kind: kind, + } + return + } + + errs[ns] = &FieldError{ + FieldNamespace: ns, + NameNamespace: nsPrefix + cf.AltName, + Name: cf.AltName, + Field: cf.Name, + Tag: ct.aliasTag, + ActualTag: ct.tag, + Param: ct.param, + Value: current.Interface(), + Kind: kind, + Type: current.Type(), + } + + return + } + + case reflect.Struct: + typ = current.Type() + + if typ != timeType { + + if ct != nil { + ct = ct.next + } + + if ct != nil && ct.typeof == typeNoStructLevel { + return + } + + v.tranverseStruct(topStruct, current, current, errPrefix+cf.Name+namespaceSeparator, nsPrefix+cf.AltName+namespaceSeparator, errs, false, partial, exclude, includeExclude, cs, ct) + return + } + } + + if !ct.hasTag { + return + } + + typ = current.Type() + +OUTER: + for { + if ct == nil { + return + } + + switch ct.typeof { + + case typeExists: + ct = ct.next + continue + + case typeOmitEmpty: + + if !nullable && !HasValue(v, topStruct, currentStruct, current, typ, kind, blank) { + return + } + + ct = ct.next + continue + + case typeDive: + + ct = ct.next + + // traverse slice or map here + // or panic ;) + switch kind { + case reflect.Slice, reflect.Array: + + for i := 0; i < current.Len(); i++ { + v.traverseField(topStruct, currentStruct, current.Index(i), errPrefix, nsPrefix, errs, partial, exclude, includeExclude, cs, &cField{Name: fmt.Sprintf(arrayIndexFieldName, cf.Name, i), AltName: fmt.Sprintf(arrayIndexFieldName, cf.AltName, i)}, ct) + } + + case reflect.Map: + for _, key := range current.MapKeys() { + v.traverseField(topStruct, currentStruct, current.MapIndex(key), errPrefix, nsPrefix, errs, partial, exclude, includeExclude, cs, &cField{Name: fmt.Sprintf(mapIndexFieldName, cf.Name, key.Interface()), AltName: fmt.Sprintf(mapIndexFieldName, cf.AltName, key.Interface())}, ct) + } + + default: + // throw error, if not a slice or map then should not have gotten here + // bad dive tag + panic("dive error! can't dive on a non slice or map") + } + + return + + case typeOr: + + errTag := blank + + for { + + if ct.fn(v, topStruct, currentStruct, current, typ, kind, ct.param) { + + // drain rest of the 'or' values, then continue or leave + for { + + ct = ct.next + + if ct == nil { + return + } + + if ct.typeof != typeOr { + continue OUTER + } + } + } + + errTag += orSeparator + ct.tag + + if ct.next == nil { + // if we get here, no valid 'or' value and no more tags + + ns := errPrefix + cf.Name + + if ct.hasAlias { + errs[ns] = &FieldError{ + FieldNamespace: ns, + NameNamespace: nsPrefix + cf.AltName, + Name: cf.AltName, + Field: cf.Name, + Tag: ct.aliasTag, + ActualTag: ct.actualAliasTag, + Value: current.Interface(), + Type: typ, + Kind: kind, + } + } else { + errs[errPrefix+cf.Name] = &FieldError{ + FieldNamespace: ns, + NameNamespace: nsPrefix + cf.AltName, + Name: cf.AltName, + Field: cf.Name, + Tag: errTag[1:], + ActualTag: errTag[1:], + Value: current.Interface(), + Type: typ, + Kind: kind, + } + } + + return + } + + ct = ct.next + } + + default: + if !ct.fn(v, topStruct, currentStruct, current, typ, kind, ct.param) { + + ns := errPrefix + cf.Name + + errs[ns] = &FieldError{ + FieldNamespace: ns, + NameNamespace: nsPrefix + cf.AltName, + Name: cf.AltName, + Field: cf.Name, + Tag: ct.aliasTag, + ActualTag: ct.tag, + Value: current.Interface(), + Param: ct.param, + Type: typ, + Kind: kind, + } + + return + + } + + ct = ct.next + } + } +} diff --git a/vendor/gopkg.in/go-playground/validator.v8/validator_test.go b/vendor/gopkg.in/go-playground/validator.v8/validator_test.go new file mode 100644 index 000000000..3629acc2b --- /dev/null +++ b/vendor/gopkg.in/go-playground/validator.v8/validator_test.go @@ -0,0 +1,5900 @@ +package validator + +import ( + "database/sql" + "database/sql/driver" + "encoding/json" + "fmt" + "reflect" + "testing" + "time" + + . "gopkg.in/go-playground/assert.v1" +) + +// NOTES: +// - Run "go test" to run tests +// - Run "gocov test | gocov report" to report on test converage by file +// - Run "gocov test | gocov annotate -" to report on all code and functions, those ,marked with "MISS" were never called +// +// or +// +// -- may be a good idea to change to output path to somewherelike /tmp +// go test -coverprofile cover.out && go tool cover -html=cover.out -o cover.html +// +// +// go test -cpuprofile cpu.out +// ./validator.test -test.bench=. -test.cpuprofile=cpu.prof +// go tool pprof validator.test cpu.prof +// +// +// go test -memprofile mem.out + +type I interface { + Foo() string +} + +type Impl struct { + F string `validate:"len=3"` +} + +func (i *Impl) Foo() string { + return i.F +} + +type SubTest struct { + Test string `validate:"required"` +} + +type TestInterface struct { + Iface I +} + +type TestString struct { + BlankTag string `validate:""` + Required string `validate:"required"` + Len string `validate:"len=10"` + Min string `validate:"min=1"` + Max string `validate:"max=10"` + MinMax string `validate:"min=1,max=10"` + Lt string `validate:"lt=10"` + Lte string `validate:"lte=10"` + Gt string `validate:"gt=10"` + Gte string `validate:"gte=10"` + OmitEmpty string `validate:"omitempty,min=1,max=10"` + Sub *SubTest + SubIgnore *SubTest `validate:"-"` + Anonymous struct { + A string `validate:"required"` + } + Iface I +} + +type TestInt32 struct { + Required int `validate:"required"` + Len int `validate:"len=10"` + Min int `validate:"min=1"` + Max int `validate:"max=10"` + MinMax int `validate:"min=1,max=10"` + Lt int `validate:"lt=10"` + Lte int `validate:"lte=10"` + Gt int `validate:"gt=10"` + Gte int `validate:"gte=10"` + OmitEmpty int `validate:"omitempty,min=1,max=10"` +} + +type TestUint64 struct { + Required uint64 `validate:"required"` + Len uint64 `validate:"len=10"` + Min uint64 `validate:"min=1"` + Max uint64 `validate:"max=10"` + MinMax uint64 `validate:"min=1,max=10"` + OmitEmpty uint64 `validate:"omitempty,min=1,max=10"` +} + +type TestFloat64 struct { + Required float64 `validate:"required"` + Len float64 `validate:"len=10"` + Min float64 `validate:"min=1"` + Max float64 `validate:"max=10"` + MinMax float64 `validate:"min=1,max=10"` + Lte float64 `validate:"lte=10"` + OmitEmpty float64 `validate:"omitempty,min=1,max=10"` +} + +type TestSlice struct { + Required []int `validate:"required"` + Len []int `validate:"len=10"` + Min []int `validate:"min=1"` + Max []int `validate:"max=10"` + MinMax []int `validate:"min=1,max=10"` + OmitEmpty []int `validate:"omitempty,min=1,max=10"` +} + +var validate = New(&Config{TagName: "validate"}) + +func AssertError(t *testing.T, err error, key, field, expectedTag string) { + + errs := err.(ValidationErrors) + + val, ok := errs[key] + EqualSkip(t, 2, ok, true) + NotEqualSkip(t, 2, val, nil) + EqualSkip(t, 2, val.Field, field) + EqualSkip(t, 2, val.Tag, expectedTag) +} + +type valuer struct { + Name string +} + +func (v valuer) Value() (driver.Value, error) { + + if v.Name == "errorme" { + panic("SQL Driver Valuer error: some kind of error") + // return nil, errors.New("some kind of error") + } + + if len(v.Name) == 0 { + return nil, nil + } + + return v.Name, nil +} + +type MadeUpCustomType struct { + FirstName string + LastName string +} + +func ValidateCustomType(field reflect.Value) interface{} { + if cust, ok := field.Interface().(MadeUpCustomType); ok { + + if len(cust.FirstName) == 0 || len(cust.LastName) == 0 { + return "" + } + + return cust.FirstName + " " + cust.LastName + } + + return "" +} + +func OverrideIntTypeForSomeReason(field reflect.Value) interface{} { + + if i, ok := field.Interface().(int); ok { + if i == 1 { + return "1" + } + + if i == 2 { + return "12" + } + } + + return "" +} + +type CustomMadeUpStruct struct { + MadeUp MadeUpCustomType `validate:"required"` + OverriddenInt int `validate:"gt=1"` +} + +func ValidateValuerType(field reflect.Value) interface{} { + + if valuer, ok := field.Interface().(driver.Valuer); ok { + + val, err := valuer.Value() + if err != nil { + // handle the error how you want + return nil + } + + return val + } + + return nil +} + +type TestPartial struct { + NoTag string + BlankTag string `validate:""` + Required string `validate:"required"` + SubSlice []*SubTest `validate:"required,dive"` + Sub *SubTest + SubIgnore *SubTest `validate:"-"` + Anonymous struct { + A string `validate:"required"` + ASubSlice []*SubTest `validate:"required,dive"` + + SubAnonStruct []struct { + Test string `validate:"required"` + OtherTest string `validate:"required"` + } `validate:"required,dive"` + } +} + +type TestStruct struct { + String string `validate:"required" json:"StringVal"` +} + +func StructValidationTestStructSuccess(v *Validate, structLevel *StructLevel) { + + st := structLevel.CurrentStruct.Interface().(TestStruct) + + if st.String != "good value" { + structLevel.ReportError(reflect.ValueOf(st.String), "String", "StringVal", "badvalueteststruct") + } +} + +func StructValidationTestStruct(v *Validate, structLevel *StructLevel) { + + st := structLevel.CurrentStruct.Interface().(TestStruct) + + if st.String != "bad value" { + structLevel.ReportError(reflect.ValueOf(st.String), "String", "StringVal", "badvalueteststruct") + } +} + +func StructValidationBadTestStructFieldName(v *Validate, structLevel *StructLevel) { + + st := structLevel.CurrentStruct.Interface().(TestStruct) + + if st.String != "bad value" { + structLevel.ReportError(reflect.ValueOf(st.String), "", "StringVal", "badvalueteststruct") + } +} + +func StructValidationBadTestStructTag(v *Validate, structLevel *StructLevel) { + + st := structLevel.CurrentStruct.Interface().(TestStruct) + + if st.String != "bad value" { + structLevel.ReportError(reflect.ValueOf(st.String), "String", "StringVal", "") + } +} + +func StructValidationNoTestStructCustomName(v *Validate, structLevel *StructLevel) { + + st := structLevel.CurrentStruct.Interface().(TestStruct) + + if st.String != "bad value" { + structLevel.ReportError(reflect.ValueOf(st.String), "String", "", "badvalueteststruct") + } +} + +func StructValidationTestStructInvalid(v *Validate, structLevel *StructLevel) { + + st := structLevel.CurrentStruct.Interface().(TestStruct) + + if st.String != "bad value" { + structLevel.ReportError(reflect.ValueOf(nil), "String", "StringVal", "badvalueteststruct") + } +} + +func StructValidationTestStructReturnValidationErrors(v *Validate, structLevel *StructLevel) { + + s := structLevel.CurrentStruct.Interface().(TestStructReturnValidationErrors) + + errs := v.Struct(s.Inner1.Inner2) + if errs == nil { + return + } + + structLevel.ReportValidationErrors("Inner1.", errs.(ValidationErrors)) +} + +func StructValidationTestStructReturnValidationErrors2(v *Validate, structLevel *StructLevel) { + + s := structLevel.CurrentStruct.Interface().(TestStructReturnValidationErrors) + + errs := v.Struct(s.Inner1.Inner2) + if errs == nil { + return + } + + structLevel.ReportValidationErrors("Inner1.|Inner1JSON.", errs.(ValidationErrors)) +} + +type TestStructReturnValidationErrorsInner2 struct { + String string `validate:"required" json:"JSONString"` +} + +type TestStructReturnValidationErrorsInner1 struct { + Inner2 *TestStructReturnValidationErrorsInner2 +} + +type TestStructReturnValidationErrors struct { + Inner1 *TestStructReturnValidationErrorsInner1 `json:"Inner1JSON"` +} + +type Inner2Namespace struct { + String []string `validate:"dive,required" json:"JSONString"` +} + +type Inner1Namespace struct { + Inner2 *Inner2Namespace `json:"Inner2JSON"` +} + +type Namespace struct { + Inner1 *Inner1Namespace `json:"Inner1JSON"` +} + +func TestNameNamespace(t *testing.T) { + + config := &Config{ + TagName: "validate", + FieldNameTag: "json", + } + + v1 := New(config) + i2 := &Inner2Namespace{String: []string{"ok", "ok", "ok"}} + i1 := &Inner1Namespace{Inner2: i2} + ns := &Namespace{Inner1: i1} + + errs := v1.Struct(ns) + Equal(t, errs, nil) + + i2.String[1] = "" + + errs = v1.Struct(ns) + NotEqual(t, errs, nil) + + ve := errs.(ValidationErrors) + Equal(t, len(ve), 1) + AssertError(t, errs, "Namespace.Inner1.Inner2.String[1]", "String[1]", "required") + + fe, ok := ve["Namespace.Inner1.Inner2.String[1]"] + Equal(t, ok, true) + + Equal(t, fe.Field, "String[1]") + Equal(t, fe.FieldNamespace, "Namespace.Inner1.Inner2.String[1]") + Equal(t, fe.Name, "JSONString[1]") + Equal(t, fe.NameNamespace, "Namespace.Inner1JSON.Inner2JSON.JSONString[1]") +} + +func TestAnonymous(t *testing.T) { + + v2 := New(&Config{TagName: "validate", FieldNameTag: "json"}) + + type Test struct { + Anonymous struct { + A string `validate:"required" json:"EH"` + } + AnonymousB struct { + B string `validate:"required" json:"BEE"` + } + anonymousC struct { + c string `validate:"required"` + } + } + + tst := &Test{ + Anonymous: struct { + A string `validate:"required" json:"EH"` + }{ + A: "1", + }, + AnonymousB: struct { + B string `validate:"required" json:"BEE"` + }{ + B: "", + }, + anonymousC: struct { + c string `validate:"required"` + }{ + c: "", + }, + } + + err := v2.Struct(tst) + NotEqual(t, err, nil) + + errs := err.(ValidationErrors) + + Equal(t, len(errs), 1) + AssertError(t, errs, "Test.AnonymousB.B", "B", "required") + Equal(t, errs["Test.AnonymousB.B"].Field, "B") + Equal(t, errs["Test.AnonymousB.B"].Name, "BEE") + + s := struct { + c string `validate:"required"` + }{ + c: "", + } + + err = v2.Struct(s) + Equal(t, err, nil) +} + +func TestAnonymousSameStructDifferentTags(t *testing.T) { + + v2 := New(&Config{TagName: "validate", FieldNameTag: "json"}) + + type Test struct { + A interface{} + } + + tst := &Test{ + A: struct { + A string `validate:"required"` + }{ + A: "", + }, + } + + err := v2.Struct(tst) + NotEqual(t, err, nil) + + errs := err.(ValidationErrors) + + Equal(t, len(errs), 1) + AssertError(t, errs, "Test.A.A", "A", "required") + + tst = &Test{ + A: struct { + A string `validate:"omitempty,required"` + }{ + A: "", + }, + } + + err = v2.Struct(tst) + Equal(t, err, nil) +} + +func TestStructLevelReturnValidationErrors(t *testing.T) { + config := &Config{ + TagName: "validate", + } + + v1 := New(config) + v1.RegisterStructValidation(StructValidationTestStructReturnValidationErrors, TestStructReturnValidationErrors{}) + + inner2 := &TestStructReturnValidationErrorsInner2{ + String: "I'm HERE", + } + + inner1 := &TestStructReturnValidationErrorsInner1{ + Inner2: inner2, + } + + val := &TestStructReturnValidationErrors{ + Inner1: inner1, + } + + errs := v1.Struct(val) + Equal(t, errs, nil) + + inner2.String = "" + + errs = v1.Struct(val) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 2) + AssertError(t, errs, "TestStructReturnValidationErrors.Inner1.Inner2.String", "String", "required") + // this is an extra error reported from struct validation + AssertError(t, errs, "TestStructReturnValidationErrors.Inner1.String", "String", "required") +} + +func TestStructLevelReturnValidationErrorsWithJSON(t *testing.T) { + config := &Config{ + TagName: "validate", + FieldNameTag: "json", + } + + v1 := New(config) + v1.RegisterStructValidation(StructValidationTestStructReturnValidationErrors2, TestStructReturnValidationErrors{}) + + inner2 := &TestStructReturnValidationErrorsInner2{ + String: "I'm HERE", + } + + inner1 := &TestStructReturnValidationErrorsInner1{ + Inner2: inner2, + } + + val := &TestStructReturnValidationErrors{ + Inner1: inner1, + } + + errs := v1.Struct(val) + Equal(t, errs, nil) + + inner2.String = "" + + errs = v1.Struct(val) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 2) + AssertError(t, errs, "TestStructReturnValidationErrors.Inner1.Inner2.String", "String", "required") + // this is an extra error reported from struct validation, it's a badly formatted one, but on purpose + AssertError(t, errs, "TestStructReturnValidationErrors.Inner1.String", "String", "required") + + fe, ok := errs.(ValidationErrors)["TestStructReturnValidationErrors.Inner1.Inner2.String"] + Equal(t, ok, true) + + // check for proper JSON namespace + Equal(t, fe.Field, "String") + Equal(t, fe.Name, "JSONString") + Equal(t, fe.FieldNamespace, "TestStructReturnValidationErrors.Inner1.Inner2.String") + Equal(t, fe.NameNamespace, "TestStructReturnValidationErrors.Inner1JSON.Inner2.JSONString") + + fe, ok = errs.(ValidationErrors)["TestStructReturnValidationErrors.Inner1.String"] + Equal(t, ok, true) + + // check for proper JSON namespace + Equal(t, fe.Field, "String") + Equal(t, fe.Name, "JSONString") + Equal(t, fe.FieldNamespace, "TestStructReturnValidationErrors.Inner1.String") + Equal(t, fe.NameNamespace, "TestStructReturnValidationErrors.Inner1JSON.JSONString") +} + +func TestStructLevelValidations(t *testing.T) { + + config := &Config{ + TagName: "validate", + } + + v1 := New(config) + v1.RegisterStructValidation(StructValidationTestStruct, TestStruct{}) + + tst := &TestStruct{ + String: "good value", + } + + errs := v1.Struct(tst) + NotEqual(t, errs, nil) + AssertError(t, errs, "TestStruct.String", "String", "badvalueteststruct") + + v2 := New(config) + v2.RegisterStructValidation(StructValidationBadTestStructFieldName, TestStruct{}) + + PanicMatches(t, func() { v2.Struct(tst) }, fieldNameRequired) + + v3 := New(config) + v3.RegisterStructValidation(StructValidationBadTestStructTag, TestStruct{}) + + PanicMatches(t, func() { v3.Struct(tst) }, tagRequired) + + v4 := New(config) + v4.RegisterStructValidation(StructValidationNoTestStructCustomName, TestStruct{}) + + errs = v4.Struct(tst) + NotEqual(t, errs, nil) + AssertError(t, errs, "TestStruct.String", "String", "badvalueteststruct") + + v5 := New(config) + v5.RegisterStructValidation(StructValidationTestStructInvalid, TestStruct{}) + + errs = v5.Struct(tst) + NotEqual(t, errs, nil) + AssertError(t, errs, "TestStruct.String", "String", "badvalueteststruct") + + v6 := New(config) + v6.RegisterStructValidation(StructValidationTestStructSuccess, TestStruct{}) + + errs = v6.Struct(tst) + Equal(t, errs, nil) +} + +func TestAliasTags(t *testing.T) { + + validate.RegisterAliasValidation("iscolor", "hexcolor|rgb|rgba|hsl|hsla") + + s := "rgb(255,255,255)" + errs := validate.Field(s, "iscolor") + Equal(t, errs, nil) + + s = "" + errs = validate.Field(s, "omitempty,iscolor") + Equal(t, errs, nil) + + s = "rgb(255,255,0)" + errs = validate.Field(s, "iscolor,len=5") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "len") + + type Test struct { + Color string `validate:"iscolor"` + } + + tst := &Test{ + Color: "#000", + } + + errs = validate.Struct(tst) + Equal(t, errs, nil) + + tst.Color = "cfvre" + errs = validate.Struct(tst) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test.Color", "Color", "iscolor") + Equal(t, errs.(ValidationErrors)["Test.Color"].ActualTag, "hexcolor|rgb|rgba|hsl|hsla") + + validate.RegisterAliasValidation("req", "required,dive,iscolor") + arr := []string{"val1", "#fff", "#000"} + + errs = validate.Field(arr, "req") + NotEqual(t, errs, nil) + AssertError(t, errs, "[0]", "[0]", "iscolor") + + PanicMatches(t, func() { validate.RegisterAliasValidation("exists", "gt=5,lt=10") }, "Alias 'exists' either contains restricted characters or is the same as a restricted tag needed for normal operation") +} + +func TestNilValidator(t *testing.T) { + + type TestStruct struct { + Test string `validate:"required"` + } + + ts := TestStruct{} + + var val *Validate + + fn := func(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + return current.String() == field.String() + } + + PanicMatches(t, func() { val.RegisterCustomTypeFunc(ValidateCustomType, MadeUpCustomType{}) }, validatorNotInitialized) + PanicMatches(t, func() { val.RegisterValidation("something", fn) }, validatorNotInitialized) + PanicMatches(t, func() { val.Field(ts.Test, "required") }, validatorNotInitialized) + PanicMatches(t, func() { val.FieldWithValue("test", ts.Test, "required") }, validatorNotInitialized) + PanicMatches(t, func() { val.Struct(ts) }, validatorNotInitialized) + PanicMatches(t, func() { val.StructExcept(ts, "Test") }, validatorNotInitialized) + PanicMatches(t, func() { val.StructPartial(ts, "Test") }, validatorNotInitialized) +} + +func TestStructPartial(t *testing.T) { + + p1 := []string{ + "NoTag", + "Required", + } + + p2 := []string{ + "SubSlice[0].Test", + "Sub", + "SubIgnore", + "Anonymous.A", + } + + p3 := []string{ + "SubTest.Test", + } + + p4 := []string{ + "A", + } + + tPartial := &TestPartial{ + NoTag: "NoTag", + Required: "Required", + + SubSlice: []*SubTest{ + { + + Test: "Required", + }, + { + + Test: "Required", + }, + }, + + Sub: &SubTest{ + Test: "1", + }, + SubIgnore: &SubTest{ + Test: "", + }, + Anonymous: struct { + A string `validate:"required"` + ASubSlice []*SubTest `validate:"required,dive"` + SubAnonStruct []struct { + Test string `validate:"required"` + OtherTest string `validate:"required"` + } `validate:"required,dive"` + }{ + A: "1", + ASubSlice: []*SubTest{ + { + Test: "Required", + }, + { + Test: "Required", + }, + }, + + SubAnonStruct: []struct { + Test string `validate:"required"` + OtherTest string `validate:"required"` + }{ + {"Required", "RequiredOther"}, + {"Required", "RequiredOther"}, + }, + }, + } + + // the following should all return no errors as everything is valid in + // the default state + errs := validate.StructPartial(tPartial, p1...) + Equal(t, errs, nil) + + errs = validate.StructPartial(tPartial, p2...) + Equal(t, errs, nil) + + // this isn't really a robust test, but is ment to illustrate the ANON CASE below + errs = validate.StructPartial(tPartial.SubSlice[0], p3...) + Equal(t, errs, nil) + + errs = validate.StructExcept(tPartial, p1...) + Equal(t, errs, nil) + + errs = validate.StructExcept(tPartial, p2...) + Equal(t, errs, nil) + + // mod tParial for required feild and re-test making sure invalid fields are NOT required: + tPartial.Required = "" + + errs = validate.StructExcept(tPartial, p1...) + Equal(t, errs, nil) + + errs = validate.StructPartial(tPartial, p2...) + Equal(t, errs, nil) + + // inversion and retesting Partial to generate failures: + errs = validate.StructPartial(tPartial, p1...) + NotEqual(t, errs, nil) + AssertError(t, errs, "TestPartial.Required", "Required", "required") + + errs = validate.StructExcept(tPartial, p2...) + AssertError(t, errs, "TestPartial.Required", "Required", "required") + + // reset Required field, and set nested struct + tPartial.Required = "Required" + tPartial.Anonymous.A = "" + + // will pass as unset feilds is not going to be tested + errs = validate.StructPartial(tPartial, p1...) + Equal(t, errs, nil) + + errs = validate.StructExcept(tPartial, p2...) + Equal(t, errs, nil) + + // ANON CASE the response here is strange, it clearly does what it is being told to + errs = validate.StructExcept(tPartial.Anonymous, p4...) + Equal(t, errs, nil) + + // will fail as unset feild is tested + errs = validate.StructPartial(tPartial, p2...) + NotEqual(t, errs, nil) + AssertError(t, errs, "TestPartial.Anonymous.A", "A", "required") + + errs = validate.StructExcept(tPartial, p1...) + NotEqual(t, errs, nil) + AssertError(t, errs, "TestPartial.Anonymous.A", "A", "required") + + // reset nested struct and unset struct in slice + tPartial.Anonymous.A = "Required" + tPartial.SubSlice[0].Test = "" + + // these will pass as unset item is NOT tested + errs = validate.StructPartial(tPartial, p1...) + Equal(t, errs, nil) + + errs = validate.StructExcept(tPartial, p2...) + Equal(t, errs, nil) + + // these will fail as unset item IS tested + errs = validate.StructExcept(tPartial, p1...) + AssertError(t, errs, "TestPartial.SubSlice[0].Test", "Test", "required") + Equal(t, len(errs.(ValidationErrors)), 1) + + errs = validate.StructPartial(tPartial, p2...) + NotEqual(t, errs, nil) + AssertError(t, errs, "TestPartial.SubSlice[0].Test", "Test", "required") + Equal(t, len(errs.(ValidationErrors)), 1) + + // Unset second slice member concurrently to test dive behavior: + tPartial.SubSlice[1].Test = "" + + errs = validate.StructPartial(tPartial, p1...) + Equal(t, errs, nil) + + // NOTE: When specifying nested items, it is still the users responsibility + // to specify the dive tag, the library does not override this. + errs = validate.StructExcept(tPartial, p2...) + NotEqual(t, errs, nil) + AssertError(t, errs, "TestPartial.SubSlice[1].Test", "Test", "required") + + errs = validate.StructExcept(tPartial, p1...) + Equal(t, len(errs.(ValidationErrors)), 2) + AssertError(t, errs, "TestPartial.SubSlice[0].Test", "Test", "required") + AssertError(t, errs, "TestPartial.SubSlice[1].Test", "Test", "required") + + errs = validate.StructPartial(tPartial, p2...) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "TestPartial.SubSlice[0].Test", "Test", "required") + + // reset struct in slice, and unset struct in slice in unset posistion + tPartial.SubSlice[0].Test = "Required" + + // these will pass as the unset item is NOT tested + errs = validate.StructPartial(tPartial, p1...) + Equal(t, errs, nil) + + errs = validate.StructPartial(tPartial, p2...) + Equal(t, errs, nil) + + // testing for missing item by exception, yes it dives and fails + errs = validate.StructExcept(tPartial, p1...) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "TestPartial.SubSlice[1].Test", "Test", "required") + + errs = validate.StructExcept(tPartial, p2...) + NotEqual(t, errs, nil) + AssertError(t, errs, "TestPartial.SubSlice[1].Test", "Test", "required") + + tPartial.SubSlice[1].Test = "Required" + + tPartial.Anonymous.SubAnonStruct[0].Test = "" + // these will pass as the unset item is NOT tested + errs = validate.StructPartial(tPartial, p1...) + Equal(t, errs, nil) + + errs = validate.StructPartial(tPartial, p2...) + Equal(t, errs, nil) + + errs = validate.StructExcept(tPartial, p1...) + NotEqual(t, errs, nil) + AssertError(t, errs, "TestPartial.Anonymous.SubAnonStruct[0].Test", "Test", "required") + + errs = validate.StructExcept(tPartial, p2...) + NotEqual(t, errs, nil) + AssertError(t, errs, "TestPartial.Anonymous.SubAnonStruct[0].Test", "Test", "required") + +} + +func TestCrossStructLteFieldValidation(t *testing.T) { + + type Inner struct { + CreatedAt *time.Time + String string + Int int + Uint uint + Float float64 + Array []string + } + + type Test struct { + Inner *Inner + CreatedAt *time.Time `validate:"ltecsfield=Inner.CreatedAt"` + String string `validate:"ltecsfield=Inner.String"` + Int int `validate:"ltecsfield=Inner.Int"` + Uint uint `validate:"ltecsfield=Inner.Uint"` + Float float64 `validate:"ltecsfield=Inner.Float"` + Array []string `validate:"ltecsfield=Inner.Array"` + } + + now := time.Now().UTC() + then := now.Add(time.Hour * 5) + + inner := &Inner{ + CreatedAt: &then, + String: "abcd", + Int: 13, + Uint: 13, + Float: 1.13, + Array: []string{"val1", "val2"}, + } + + test := &Test{ + Inner: inner, + CreatedAt: &now, + String: "abc", + Int: 12, + Uint: 12, + Float: 1.12, + Array: []string{"val1"}, + } + + errs := validate.Struct(test) + Equal(t, errs, nil) + + test.CreatedAt = &then + test.String = "abcd" + test.Int = 13 + test.Uint = 13 + test.Float = 1.13 + test.Array = []string{"val1", "val2"} + + errs = validate.Struct(test) + Equal(t, errs, nil) + + after := now.Add(time.Hour * 10) + + test.CreatedAt = &after + test.String = "abce" + test.Int = 14 + test.Uint = 14 + test.Float = 1.14 + test.Array = []string{"val1", "val2", "val3"} + + errs = validate.Struct(test) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test.CreatedAt", "CreatedAt", "ltecsfield") + AssertError(t, errs, "Test.String", "String", "ltecsfield") + AssertError(t, errs, "Test.Int", "Int", "ltecsfield") + AssertError(t, errs, "Test.Uint", "Uint", "ltecsfield") + AssertError(t, errs, "Test.Float", "Float", "ltecsfield") + AssertError(t, errs, "Test.Array", "Array", "ltecsfield") + + errs = validate.FieldWithValue(1, "", "ltecsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltecsfield") + + errs = validate.FieldWithValue(test, now, "ltecsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltecsfield") +} + +func TestCrossStructLtFieldValidation(t *testing.T) { + + type Inner struct { + CreatedAt *time.Time + String string + Int int + Uint uint + Float float64 + Array []string + } + + type Test struct { + Inner *Inner + CreatedAt *time.Time `validate:"ltcsfield=Inner.CreatedAt"` + String string `validate:"ltcsfield=Inner.String"` + Int int `validate:"ltcsfield=Inner.Int"` + Uint uint `validate:"ltcsfield=Inner.Uint"` + Float float64 `validate:"ltcsfield=Inner.Float"` + Array []string `validate:"ltcsfield=Inner.Array"` + } + + now := time.Now().UTC() + then := now.Add(time.Hour * 5) + + inner := &Inner{ + CreatedAt: &then, + String: "abcd", + Int: 13, + Uint: 13, + Float: 1.13, + Array: []string{"val1", "val2"}, + } + + test := &Test{ + Inner: inner, + CreatedAt: &now, + String: "abc", + Int: 12, + Uint: 12, + Float: 1.12, + Array: []string{"val1"}, + } + + errs := validate.Struct(test) + Equal(t, errs, nil) + + test.CreatedAt = &then + test.String = "abcd" + test.Int = 13 + test.Uint = 13 + test.Float = 1.13 + test.Array = []string{"val1", "val2"} + + errs = validate.Struct(test) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test.CreatedAt", "CreatedAt", "ltcsfield") + AssertError(t, errs, "Test.String", "String", "ltcsfield") + AssertError(t, errs, "Test.Int", "Int", "ltcsfield") + AssertError(t, errs, "Test.Uint", "Uint", "ltcsfield") + AssertError(t, errs, "Test.Float", "Float", "ltcsfield") + AssertError(t, errs, "Test.Array", "Array", "ltcsfield") + + errs = validate.FieldWithValue(1, "", "ltcsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltcsfield") + + errs = validate.FieldWithValue(test, now, "ltcsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltcsfield") +} + +func TestCrossStructGteFieldValidation(t *testing.T) { + + type Inner struct { + CreatedAt *time.Time + String string + Int int + Uint uint + Float float64 + Array []string + } + + type Test struct { + Inner *Inner + CreatedAt *time.Time `validate:"gtecsfield=Inner.CreatedAt"` + String string `validate:"gtecsfield=Inner.String"` + Int int `validate:"gtecsfield=Inner.Int"` + Uint uint `validate:"gtecsfield=Inner.Uint"` + Float float64 `validate:"gtecsfield=Inner.Float"` + Array []string `validate:"gtecsfield=Inner.Array"` + } + + now := time.Now().UTC() + then := now.Add(time.Hour * -5) + + inner := &Inner{ + CreatedAt: &then, + String: "abcd", + Int: 13, + Uint: 13, + Float: 1.13, + Array: []string{"val1", "val2"}, + } + + test := &Test{ + Inner: inner, + CreatedAt: &now, + String: "abcde", + Int: 14, + Uint: 14, + Float: 1.14, + Array: []string{"val1", "val2", "val3"}, + } + + errs := validate.Struct(test) + Equal(t, errs, nil) + + test.CreatedAt = &then + test.String = "abcd" + test.Int = 13 + test.Uint = 13 + test.Float = 1.13 + test.Array = []string{"val1", "val2"} + + errs = validate.Struct(test) + Equal(t, errs, nil) + + before := now.Add(time.Hour * -10) + + test.CreatedAt = &before + test.String = "abc" + test.Int = 12 + test.Uint = 12 + test.Float = 1.12 + test.Array = []string{"val1"} + + errs = validate.Struct(test) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test.CreatedAt", "CreatedAt", "gtecsfield") + AssertError(t, errs, "Test.String", "String", "gtecsfield") + AssertError(t, errs, "Test.Int", "Int", "gtecsfield") + AssertError(t, errs, "Test.Uint", "Uint", "gtecsfield") + AssertError(t, errs, "Test.Float", "Float", "gtecsfield") + AssertError(t, errs, "Test.Array", "Array", "gtecsfield") + + errs = validate.FieldWithValue(1, "", "gtecsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtecsfield") + + errs = validate.FieldWithValue(test, now, "gtecsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtecsfield") +} + +func TestCrossStructGtFieldValidation(t *testing.T) { + + type Inner struct { + CreatedAt *time.Time + String string + Int int + Uint uint + Float float64 + Array []string + } + + type Test struct { + Inner *Inner + CreatedAt *time.Time `validate:"gtcsfield=Inner.CreatedAt"` + String string `validate:"gtcsfield=Inner.String"` + Int int `validate:"gtcsfield=Inner.Int"` + Uint uint `validate:"gtcsfield=Inner.Uint"` + Float float64 `validate:"gtcsfield=Inner.Float"` + Array []string `validate:"gtcsfield=Inner.Array"` + } + + now := time.Now().UTC() + then := now.Add(time.Hour * -5) + + inner := &Inner{ + CreatedAt: &then, + String: "abcd", + Int: 13, + Uint: 13, + Float: 1.13, + Array: []string{"val1", "val2"}, + } + + test := &Test{ + Inner: inner, + CreatedAt: &now, + String: "abcde", + Int: 14, + Uint: 14, + Float: 1.14, + Array: []string{"val1", "val2", "val3"}, + } + + errs := validate.Struct(test) + Equal(t, errs, nil) + + test.CreatedAt = &then + test.String = "abcd" + test.Int = 13 + test.Uint = 13 + test.Float = 1.13 + test.Array = []string{"val1", "val2"} + + errs = validate.Struct(test) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test.CreatedAt", "CreatedAt", "gtcsfield") + AssertError(t, errs, "Test.String", "String", "gtcsfield") + AssertError(t, errs, "Test.Int", "Int", "gtcsfield") + AssertError(t, errs, "Test.Uint", "Uint", "gtcsfield") + AssertError(t, errs, "Test.Float", "Float", "gtcsfield") + AssertError(t, errs, "Test.Array", "Array", "gtcsfield") + + errs = validate.FieldWithValue(1, "", "gtcsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtcsfield") + + errs = validate.FieldWithValue(test, now, "gtcsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtcsfield") +} + +func TestCrossStructNeFieldValidation(t *testing.T) { + + type Inner struct { + CreatedAt *time.Time + } + + type Test struct { + Inner *Inner + CreatedAt *time.Time `validate:"necsfield=Inner.CreatedAt"` + } + + now := time.Now().UTC() + then := now.Add(time.Hour * 5) + + inner := &Inner{ + CreatedAt: &then, + } + + test := &Test{ + Inner: inner, + CreatedAt: &now, + } + + errs := validate.Struct(test) + Equal(t, errs, nil) + + test.CreatedAt = &then + + errs = validate.Struct(test) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test.CreatedAt", "CreatedAt", "necsfield") + + var j uint64 + var k float64 + var j2 uint64 + var k2 float64 + s := "abcd" + i := 1 + j = 1 + k = 1.543 + arr := []string{"test"} + + s2 := "abcd" + i2 := 1 + j2 = 1 + k2 = 1.543 + arr2 := []string{"test"} + arr3 := []string{"test", "test2"} + now2 := now + + errs = validate.FieldWithValue(s, s2, "necsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "necsfield") + + errs = validate.FieldWithValue(i2, i, "necsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "necsfield") + + errs = validate.FieldWithValue(j2, j, "necsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "necsfield") + + errs = validate.FieldWithValue(k2, k, "necsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "necsfield") + + errs = validate.FieldWithValue(arr2, arr, "necsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "necsfield") + + errs = validate.FieldWithValue(now2, now, "necsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "necsfield") + + errs = validate.FieldWithValue(arr3, arr, "necsfield") + Equal(t, errs, nil) + + type SInner struct { + Name string + } + + type TStruct struct { + Inner *SInner + CreatedAt *time.Time `validate:"necsfield=Inner"` + } + + sinner := &SInner{ + Name: "NAME", + } + + test2 := &TStruct{ + Inner: sinner, + CreatedAt: &now, + } + + errs = validate.Struct(test2) + Equal(t, errs, nil) + + test2.Inner = nil + errs = validate.Struct(test2) + Equal(t, errs, nil) + + errs = validate.FieldWithValue(nil, 1, "necsfield") + Equal(t, errs, nil) +} + +func TestCrossStructEqFieldValidation(t *testing.T) { + + type Inner struct { + CreatedAt *time.Time + } + + type Test struct { + Inner *Inner + CreatedAt *time.Time `validate:"eqcsfield=Inner.CreatedAt"` + } + + now := time.Now().UTC() + + inner := &Inner{ + CreatedAt: &now, + } + + test := &Test{ + Inner: inner, + CreatedAt: &now, + } + + errs := validate.Struct(test) + Equal(t, errs, nil) + + newTime := time.Now().UTC() + test.CreatedAt = &newTime + + errs = validate.Struct(test) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test.CreatedAt", "CreatedAt", "eqcsfield") + + var j uint64 + var k float64 + s := "abcd" + i := 1 + j = 1 + k = 1.543 + arr := []string{"test"} + + var j2 uint64 + var k2 float64 + s2 := "abcd" + i2 := 1 + j2 = 1 + k2 = 1.543 + arr2 := []string{"test"} + arr3 := []string{"test", "test2"} + now2 := now + + errs = validate.FieldWithValue(s, s2, "eqcsfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(i2, i, "eqcsfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(j2, j, "eqcsfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(k2, k, "eqcsfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(arr2, arr, "eqcsfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(now2, now, "eqcsfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(arr3, arr, "eqcsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "eqcsfield") + + type SInner struct { + Name string + } + + type TStruct struct { + Inner *SInner + CreatedAt *time.Time `validate:"eqcsfield=Inner"` + } + + sinner := &SInner{ + Name: "NAME", + } + + test2 := &TStruct{ + Inner: sinner, + CreatedAt: &now, + } + + errs = validate.Struct(test2) + NotEqual(t, errs, nil) + AssertError(t, errs, "TStruct.CreatedAt", "CreatedAt", "eqcsfield") + + test2.Inner = nil + errs = validate.Struct(test2) + NotEqual(t, errs, nil) + AssertError(t, errs, "TStruct.CreatedAt", "CreatedAt", "eqcsfield") + + errs = validate.FieldWithValue(nil, 1, "eqcsfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "eqcsfield") +} + +func TestCrossNamespaceFieldValidation(t *testing.T) { + + type SliceStruct struct { + Name string + } + + type MapStruct struct { + Name string + } + + type Inner struct { + CreatedAt *time.Time + Slice []string + SliceStructs []*SliceStruct + SliceSlice [][]string + SliceSliceStruct [][]*SliceStruct + SliceMap []map[string]string + Map map[string]string + MapMap map[string]map[string]string + MapStructs map[string]*SliceStruct + MapMapStruct map[string]map[string]*SliceStruct + MapSlice map[string][]string + MapInt map[int]string + MapInt8 map[int8]string + MapInt16 map[int16]string + MapInt32 map[int32]string + MapInt64 map[int64]string + MapUint map[uint]string + MapUint8 map[uint8]string + MapUint16 map[uint16]string + MapUint32 map[uint32]string + MapUint64 map[uint64]string + MapFloat32 map[float32]string + MapFloat64 map[float64]string + MapBool map[bool]string + } + + type Test struct { + Inner *Inner + CreatedAt *time.Time + } + + now := time.Now() + + inner := &Inner{ + CreatedAt: &now, + Slice: []string{"val1", "val2", "val3"}, + SliceStructs: []*SliceStruct{{Name: "name1"}, {Name: "name2"}, {Name: "name3"}}, + SliceSlice: [][]string{{"1", "2", "3"}, {"4", "5", "6"}, {"7", "8", "9"}}, + SliceSliceStruct: [][]*SliceStruct{{{Name: "name1"}, {Name: "name2"}, {Name: "name3"}}, {{Name: "name4"}, {Name: "name5"}, {Name: "name6"}}, {{Name: "name7"}, {Name: "name8"}, {Name: "name9"}}}, + SliceMap: []map[string]string{{"key1": "val1", "key2": "val2", "key3": "val3"}, {"key4": "val4", "key5": "val5", "key6": "val6"}}, + Map: map[string]string{"key1": "val1", "key2": "val2", "key3": "val3"}, + MapStructs: map[string]*SliceStruct{"key1": {Name: "name1"}, "key2": {Name: "name2"}, "key3": {Name: "name3"}}, + MapMap: map[string]map[string]string{"key1": {"key1-1": "val1"}, "key2": {"key2-1": "val2"}, "key3": {"key3-1": "val3"}}, + MapMapStruct: map[string]map[string]*SliceStruct{"key1": {"key1-1": {Name: "name1"}}, "key2": {"key2-1": {Name: "name2"}}, "key3": {"key3-1": {Name: "name3"}}}, + MapSlice: map[string][]string{"key1": {"1", "2", "3"}, "key2": {"4", "5", "6"}, "key3": {"7", "8", "9"}}, + MapInt: map[int]string{1: "val1", 2: "val2", 3: "val3"}, + MapInt8: map[int8]string{1: "val1", 2: "val2", 3: "val3"}, + MapInt16: map[int16]string{1: "val1", 2: "val2", 3: "val3"}, + MapInt32: map[int32]string{1: "val1", 2: "val2", 3: "val3"}, + MapInt64: map[int64]string{1: "val1", 2: "val2", 3: "val3"}, + MapUint: map[uint]string{1: "val1", 2: "val2", 3: "val3"}, + MapUint8: map[uint8]string{1: "val1", 2: "val2", 3: "val3"}, + MapUint16: map[uint16]string{1: "val1", 2: "val2", 3: "val3"}, + MapUint32: map[uint32]string{1: "val1", 2: "val2", 3: "val3"}, + MapUint64: map[uint64]string{1: "val1", 2: "val2", 3: "val3"}, + MapFloat32: map[float32]string{1.01: "val1", 2.02: "val2", 3.03: "val3"}, + MapFloat64: map[float64]string{1.01: "val1", 2.02: "val2", 3.03: "val3"}, + MapBool: map[bool]string{true: "val1", false: "val2"}, + } + + test := &Test{ + Inner: inner, + CreatedAt: &now, + } + + val := reflect.ValueOf(test) + + current, kind, ok := validate.GetStructFieldOK(val, "Inner.CreatedAt") + Equal(t, ok, true) + Equal(t, kind, reflect.Struct) + tm, ok := current.Interface().(time.Time) + Equal(t, ok, true) + Equal(t, tm, now) + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.Slice[1]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val2") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.CrazyNonExistantField") + Equal(t, ok, false) + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.Slice[101]") + Equal(t, ok, false) + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.Map[key3]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val3") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapMap[key2][key2-1]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val2") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapStructs[key2].Name") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "name2") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapMapStruct[key3][key3-1].Name") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "name3") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.SliceSlice[2][0]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "7") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.SliceSliceStruct[2][1].Name") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "name8") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.SliceMap[1][key5]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val5") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapSlice[key3][2]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "9") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapInt[2]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val2") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapInt8[2]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val2") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapInt16[2]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val2") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapInt32[2]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val2") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapInt64[2]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val2") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapUint[2]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val2") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapUint8[2]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val2") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapUint16[2]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val2") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapUint32[2]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val2") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapUint64[2]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val2") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapFloat32[3.03]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val3") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapFloat64[2.02]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val2") + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.MapBool[true]") + Equal(t, ok, true) + Equal(t, kind, reflect.String) + Equal(t, current.String(), "val1") + + inner = &Inner{ + CreatedAt: &now, + Slice: []string{"val1", "val2", "val3"}, + SliceStructs: []*SliceStruct{{Name: "name1"}, {Name: "name2"}, nil}, + SliceSlice: [][]string{{"1", "2", "3"}, {"4", "5", "6"}, {"7", "8", "9"}}, + SliceSliceStruct: [][]*SliceStruct{{{Name: "name1"}, {Name: "name2"}, {Name: "name3"}}, {{Name: "name4"}, {Name: "name5"}, {Name: "name6"}}, {{Name: "name7"}, {Name: "name8"}, {Name: "name9"}}}, + SliceMap: []map[string]string{{"key1": "val1", "key2": "val2", "key3": "val3"}, {"key4": "val4", "key5": "val5", "key6": "val6"}}, + Map: map[string]string{"key1": "val1", "key2": "val2", "key3": "val3"}, + MapStructs: map[string]*SliceStruct{"key1": {Name: "name1"}, "key2": {Name: "name2"}, "key3": {Name: "name3"}}, + MapMap: map[string]map[string]string{"key1": {"key1-1": "val1"}, "key2": {"key2-1": "val2"}, "key3": {"key3-1": "val3"}}, + MapMapStruct: map[string]map[string]*SliceStruct{"key1": {"key1-1": {Name: "name1"}}, "key2": {"key2-1": {Name: "name2"}}, "key3": {"key3-1": {Name: "name3"}}}, + MapSlice: map[string][]string{"key1": {"1", "2", "3"}, "key2": {"4", "5", "6"}, "key3": {"7", "8", "9"}}, + } + + test = &Test{ + Inner: inner, + CreatedAt: nil, + } + + val = reflect.ValueOf(test) + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.SliceStructs[2]") + Equal(t, ok, true) + Equal(t, kind, reflect.Ptr) + Equal(t, current.String(), "<*validator.SliceStruct Value>") + Equal(t, current.IsNil(), true) + + current, kind, ok = validate.GetStructFieldOK(val, "Inner.SliceStructs[2].Name") + Equal(t, ok, false) + Equal(t, kind, reflect.Ptr) + Equal(t, current.String(), "<*validator.SliceStruct Value>") + Equal(t, current.IsNil(), true) + + PanicMatches(t, func() { validate.GetStructFieldOK(reflect.ValueOf(1), "crazyinput") }, "Invalid field namespace") +} + +func TestExistsValidation(t *testing.T) { + + jsonText := "{ \"truthiness2\": true }" + + type Thing struct { + Truthiness *bool `json:"truthiness" validate:"exists,required"` + } + + var ting Thing + + err := json.Unmarshal([]byte(jsonText), &ting) + Equal(t, err, nil) + NotEqual(t, ting, nil) + Equal(t, ting.Truthiness, nil) + + errs := validate.Struct(ting) + NotEqual(t, errs, nil) + AssertError(t, errs, "Thing.Truthiness", "Truthiness", "exists") + + jsonText = "{ \"truthiness\": true }" + + err = json.Unmarshal([]byte(jsonText), &ting) + Equal(t, err, nil) + NotEqual(t, ting, nil) + Equal(t, ting.Truthiness, true) + + errs = validate.Struct(ting) + Equal(t, errs, nil) +} + +func TestSQLValue2Validation(t *testing.T) { + + config := &Config{ + TagName: "validate", + } + + validate := New(config) + validate.RegisterCustomTypeFunc(ValidateValuerType, valuer{}, (*driver.Valuer)(nil), sql.NullString{}, sql.NullInt64{}, sql.NullBool{}, sql.NullFloat64{}) + validate.RegisterCustomTypeFunc(ValidateCustomType, MadeUpCustomType{}) + validate.RegisterCustomTypeFunc(OverrideIntTypeForSomeReason, 1) + + val := valuer{ + Name: "", + } + + errs := validate.Field(val, "required") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "required") + + val.Name = "Valid Name" + errs = validate.Field(val, "required") + Equal(t, errs, nil) + + val.Name = "errorme" + + PanicMatches(t, func() { validate.Field(val, "required") }, "SQL Driver Valuer error: some kind of error") + + type myValuer valuer + + myVal := valuer{ + Name: "", + } + + errs = validate.Field(myVal, "required") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "required") + + cust := MadeUpCustomType{ + FirstName: "Joey", + LastName: "Bloggs", + } + + c := CustomMadeUpStruct{MadeUp: cust, OverriddenInt: 2} + + errs = validate.Struct(c) + Equal(t, errs, nil) + + c.MadeUp.FirstName = "" + c.OverriddenInt = 1 + + errs = validate.Struct(c) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 2) + AssertError(t, errs, "CustomMadeUpStruct.MadeUp", "MadeUp", "required") + AssertError(t, errs, "CustomMadeUpStruct.OverriddenInt", "OverriddenInt", "gt") +} + +func TestSQLValueValidation(t *testing.T) { + + validate := New(&Config{TagName: "validate"}) + validate.RegisterCustomTypeFunc(ValidateValuerType, (*driver.Valuer)(nil), valuer{}) + validate.RegisterCustomTypeFunc(ValidateCustomType, MadeUpCustomType{}) + validate.RegisterCustomTypeFunc(OverrideIntTypeForSomeReason, 1) + + val := valuer{ + Name: "", + } + + errs := validate.Field(val, "required") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "required") + + val.Name = "Valid Name" + errs = validate.Field(val, "required") + Equal(t, errs, nil) + + val.Name = "errorme" + + PanicMatches(t, func() { errs = validate.Field(val, "required") }, "SQL Driver Valuer error: some kind of error") + + type myValuer valuer + + myVal := valuer{ + Name: "", + } + + errs = validate.Field(myVal, "required") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "required") + + cust := MadeUpCustomType{ + FirstName: "Joey", + LastName: "Bloggs", + } + + c := CustomMadeUpStruct{MadeUp: cust, OverriddenInt: 2} + + errs = validate.Struct(c) + Equal(t, errs, nil) + + c.MadeUp.FirstName = "" + c.OverriddenInt = 1 + + errs = validate.Struct(c) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 2) + AssertError(t, errs, "CustomMadeUpStruct.MadeUp", "MadeUp", "required") + AssertError(t, errs, "CustomMadeUpStruct.OverriddenInt", "OverriddenInt", "gt") +} + +func TestMACValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"3D:F2:C9:A6:B3:4F", true}, + {"3D-F2-C9-A6-B3:4F", false}, + {"123", false}, + {"", false}, + {"abacaba", false}, + {"00:25:96:FF:FE:12:34:56", true}, + {"0025:96FF:FE12:3456", false}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "mac") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d mac failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d mac failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "mac" { + t.Fatalf("Index: %d mac failed Error: %s", i, errs) + } + } + } + } +} + +func TestIPValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {"10.0.0.1", true}, + {"172.16.0.1", true}, + {"192.168.0.1", true}, + {"192.168.255.254", true}, + {"192.168.255.256", false}, + {"172.16.255.254", true}, + {"172.16.256.255", false}, + {"2001:cdba:0000:0000:0000:0000:3257:9652", true}, + {"2001:cdba:0:0:0:0:3257:9652", true}, + {"2001:cdba::3257:9652", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "ip") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d ip failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d ip failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "ip" { + t.Fatalf("Index: %d ip failed Error: %s", i, errs) + } + } + } + } +} + +func TestIPv6Validation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"10.0.0.1", false}, + {"172.16.0.1", false}, + {"192.168.0.1", false}, + {"192.168.255.254", false}, + {"192.168.255.256", false}, + {"172.16.255.254", false}, + {"172.16.256.255", false}, + {"2001:cdba:0000:0000:0000:0000:3257:9652", true}, + {"2001:cdba:0:0:0:0:3257:9652", true}, + {"2001:cdba::3257:9652", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "ipv6") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d ipv6 failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d ipv6 failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "ipv6" { + t.Fatalf("Index: %d ipv6 failed Error: %s", i, errs) + } + } + } + } +} + +func TestIPv4Validation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"10.0.0.1", true}, + {"172.16.0.1", true}, + {"192.168.0.1", true}, + {"192.168.255.254", true}, + {"192.168.255.256", false}, + {"172.16.255.254", true}, + {"172.16.256.255", false}, + {"2001:cdba:0000:0000:0000:0000:3257:9652", false}, + {"2001:cdba:0:0:0:0:3257:9652", false}, + {"2001:cdba::3257:9652", false}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "ipv4") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d ipv4 failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d ipv4 failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "ipv4" { + t.Fatalf("Index: %d ipv4 failed Error: %s", i, errs) + } + } + } + } +} + +func TestCIDRValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"10.0.0.0/0", true}, + {"10.0.0.1/8", true}, + {"172.16.0.1/16", true}, + {"192.168.0.1/24", true}, + {"192.168.255.254/24", true}, + {"192.168.255.254/48", false}, + {"192.168.255.256/24", false}, + {"172.16.255.254/16", true}, + {"172.16.256.255/16", false}, + {"2001:cdba:0000:0000:0000:0000:3257:9652/64", true}, + {"2001:cdba:0000:0000:0000:0000:3257:9652/256", false}, + {"2001:cdba:0:0:0:0:3257:9652/32", true}, + {"2001:cdba::3257:9652/16", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "cidr") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d cidr failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d cidr failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "cidr" { + t.Fatalf("Index: %d cidr failed Error: %s", i, errs) + } + } + } + } +} + +func TestCIDRv6Validation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"10.0.0.0/0", false}, + {"10.0.0.1/8", false}, + {"172.16.0.1/16", false}, + {"192.168.0.1/24", false}, + {"192.168.255.254/24", false}, + {"192.168.255.254/48", false}, + {"192.168.255.256/24", false}, + {"172.16.255.254/16", false}, + {"172.16.256.255/16", false}, + {"2001:cdba:0000:0000:0000:0000:3257:9652/64", true}, + {"2001:cdba:0000:0000:0000:0000:3257:9652/256", false}, + {"2001:cdba:0:0:0:0:3257:9652/32", true}, + {"2001:cdba::3257:9652/16", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "cidrv6") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d cidrv6 failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d cidrv6 failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "cidrv6" { + t.Fatalf("Index: %d cidrv6 failed Error: %s", i, errs) + } + } + } + } +} + +func TestCIDRv4Validation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"10.0.0.0/0", true}, + {"10.0.0.1/8", true}, + {"172.16.0.1/16", true}, + {"192.168.0.1/24", true}, + {"192.168.255.254/24", true}, + {"192.168.255.254/48", false}, + {"192.168.255.256/24", false}, + {"172.16.255.254/16", true}, + {"172.16.256.255/16", false}, + {"2001:cdba:0000:0000:0000:0000:3257:9652/64", false}, + {"2001:cdba:0000:0000:0000:0000:3257:9652/256", false}, + {"2001:cdba:0:0:0:0:3257:9652/32", false}, + {"2001:cdba::3257:9652/16", false}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "cidrv4") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d cidrv4 failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d cidrv4 failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "cidrv4" { + t.Fatalf("Index: %d cidrv4 failed Error: %s", i, errs) + } + } + } + } +} + +func TestTCPAddrValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {":80", false}, + {"127.0.0.1:80", true}, + {"[::1]:80", true}, + {"256.0.0.0:1", false}, + {"[::1]", false}, + } + + for i, test := range tests { + errs := validate.Field(test.param, "tcp_addr") + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d tcp_addr failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d tcp_addr failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "tcp_addr" { + t.Fatalf("Index: %d tcp_addr failed Error: %s", i, errs) + } + } + } + } +} + +func TestTCP6AddrValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {":80", false}, + {"127.0.0.1:80", false}, + {"[::1]:80", true}, + {"256.0.0.0:1", false}, + {"[::1]", false}, + } + + for i, test := range tests { + errs := validate.Field(test.param, "tcp6_addr") + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d tcp6_addr failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d tcp6_addr failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "tcp6_addr" { + t.Fatalf("Index: %d tcp6_addr failed Error: %s", i, errs) + } + } + } + } +} + +func TestTCP4AddrValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {":80", false}, + {"127.0.0.1:80", true}, + {"[::1]:80", false}, // https://github.com/golang/go/issues/14037 + {"256.0.0.0:1", false}, + {"[::1]", false}, + } + + for i, test := range tests { + errs := validate.Field(test.param, "tcp4_addr") + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d tcp4_addr failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Log(test.param, IsEqual(errs, nil)) + t.Fatalf("Index: %d tcp4_addr failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "tcp4_addr" { + t.Fatalf("Index: %d tcp4_addr failed Error: %s", i, errs) + } + } + } + } +} + +func TestUDPAddrValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {":80", false}, + {"127.0.0.1:80", true}, + {"[::1]:80", true}, + {"256.0.0.0:1", false}, + {"[::1]", false}, + } + + for i, test := range tests { + errs := validate.Field(test.param, "udp_addr") + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d udp_addr failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d udp_addr failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "udp_addr" { + t.Fatalf("Index: %d udp_addr failed Error: %s", i, errs) + } + } + } + } +} + +func TestUDP6AddrValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {":80", false}, + {"127.0.0.1:80", false}, + {"[::1]:80", true}, + {"256.0.0.0:1", false}, + {"[::1]", false}, + } + + for i, test := range tests { + errs := validate.Field(test.param, "udp6_addr") + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d udp6_addr failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d udp6_addr failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "udp6_addr" { + t.Fatalf("Index: %d udp6_addr failed Error: %s", i, errs) + } + } + } + } +} + +func TestUDP4AddrValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {":80", false}, + {"127.0.0.1:80", true}, + {"[::1]:80", false}, // https://github.com/golang/go/issues/14037 + {"256.0.0.0:1", false}, + {"[::1]", false}, + } + + for i, test := range tests { + errs := validate.Field(test.param, "udp4_addr") + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d udp4_addr failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Log(test.param, IsEqual(errs, nil)) + t.Fatalf("Index: %d udp4_addr failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "udp4_addr" { + t.Fatalf("Index: %d udp4_addr failed Error: %s", i, errs) + } + } + } + } +} + +func TestIPAddrValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {"127.0.0.1", true}, + {"127.0.0.1:80", false}, + {"::1", true}, + {"256.0.0.0", false}, + {"localhost", false}, + } + + for i, test := range tests { + errs := validate.Field(test.param, "ip_addr") + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d ip_addr failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d ip_addr failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "ip_addr" { + t.Fatalf("Index: %d ip_addr failed Error: %s", i, errs) + } + } + } + } +} + +func TestIP6AddrValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {"127.0.0.1", false}, // https://github.com/golang/go/issues/14037 + {"127.0.0.1:80", false}, + {"::1", true}, + {"0:0:0:0:0:0:0:1", true}, + {"256.0.0.0", false}, + } + + for i, test := range tests { + errs := validate.Field(test.param, "ip6_addr") + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d ip6_addr failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d ip6_addr failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "ip6_addr" { + t.Fatalf("Index: %d ip6_addr failed Error: %s", i, errs) + } + } + } + } +} + +func TestIP4AddrValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {"127.0.0.1", true}, + {"127.0.0.1:80", false}, + {"::1", false}, // https://github.com/golang/go/issues/14037 + {"256.0.0.0", false}, + {"localhost", false}, + } + + for i, test := range tests { + errs := validate.Field(test.param, "ip4_addr") + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d ip4_addr failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Log(test.param, IsEqual(errs, nil)) + t.Fatalf("Index: %d ip4_addr failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "ip4_addr" { + t.Fatalf("Index: %d ip4_addr failed Error: %s", i, errs) + } + } + } + } +} + +func TestUnixAddrValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", true}, + {"v.sock", true}, + } + + for i, test := range tests { + errs := validate.Field(test.param, "unix_addr") + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d unix_addr failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Log(test.param, IsEqual(errs, nil)) + t.Fatalf("Index: %d unix_addr failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "unix_addr" { + t.Fatalf("Index: %d unix_addr failed Error: %s", i, errs) + } + } + } + } +} + +func TestSliceMapArrayChanFuncPtrInterfaceRequiredValidation(t *testing.T) { + + var m map[string]string + + errs := validate.Field(m, "required") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "required") + + m = map[string]string{} + errs = validate.Field(m, "required") + Equal(t, errs, nil) + + var arr [5]string + errs = validate.Field(arr, "required") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "required") + + arr[0] = "ok" + errs = validate.Field(arr, "required") + Equal(t, errs, nil) + + var s []string + errs = validate.Field(s, "required") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "required") + + s = []string{} + errs = validate.Field(s, "required") + Equal(t, errs, nil) + + var c chan string + errs = validate.Field(c, "required") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "required") + + c = make(chan string) + errs = validate.Field(c, "required") + Equal(t, errs, nil) + + var tst *int + errs = validate.Field(tst, "required") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "required") + + one := 1 + tst = &one + errs = validate.Field(tst, "required") + Equal(t, errs, nil) + + var iface interface{} + + errs = validate.Field(iface, "required") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "required") + + errs = validate.Field(iface, "omitempty,required") + Equal(t, errs, nil) + + errs = validate.Field(iface, "") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(nil, iface, "") + Equal(t, errs, nil) + + var f func(string) + + errs = validate.Field(f, "required") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "required") + + f = func(name string) {} + + errs = validate.Field(f, "required") + Equal(t, errs, nil) +} + +func TestDatePtrValidationIssueValidation(t *testing.T) { + + type Test struct { + LastViewed *time.Time + Reminder *time.Time + } + + test := &Test{} + + errs := validate.Struct(test) + Equal(t, errs, nil) +} + +func TestCommaAndPipeObfuscationValidation(t *testing.T) { + s := "My Name Is, |joeybloggs|" + + errs := validate.Field(s, "excludesall=0x2C") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "excludesall") + + errs = validate.Field(s, "excludesall=0x7C") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "excludesall") +} + +func TestBadKeyValidation(t *testing.T) { + type Test struct { + Name string `validate:"required, "` + } + + tst := &Test{ + Name: "test", + } + + PanicMatches(t, func() { validate.Struct(tst) }, "Undefined validation function on field Name") + + type Test2 struct { + Name string `validate:"required,,len=2"` + } + + tst2 := &Test2{ + Name: "test", + } + + PanicMatches(t, func() { validate.Struct(tst2) }, "Invalid validation tag on field Name") +} + +func TestInterfaceErrValidation(t *testing.T) { + + var v1 interface{} + var v2 interface{} + + v2 = 1 + v1 = v2 + + errs := validate.Field(v1, "len=1") + Equal(t, errs, nil) + + errs = validate.Field(v2, "len=1") + Equal(t, errs, nil) + + type ExternalCMD struct { + Userid string `json:"userid"` + Action uint32 `json:"action"` + Data interface{} `json:"data,omitempty" validate:"required"` + } + + s := &ExternalCMD{ + Userid: "123456", + Action: 10000, + // Data: 1, + } + + errs = validate.Struct(s) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "ExternalCMD.Data", "Data", "required") + + type ExternalCMD2 struct { + Userid string `json:"userid"` + Action uint32 `json:"action"` + Data interface{} `json:"data,omitempty" validate:"len=1"` + } + + s2 := &ExternalCMD2{ + Userid: "123456", + Action: 10000, + // Data: 1, + } + + errs = validate.Struct(s2) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "ExternalCMD2.Data", "Data", "len") + + s3 := &ExternalCMD2{ + Userid: "123456", + Action: 10000, + Data: 2, + } + + errs = validate.Struct(s3) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "ExternalCMD2.Data", "Data", "len") + + type Inner struct { + Name string `validate:"required"` + } + + inner := &Inner{ + Name: "", + } + + s4 := &ExternalCMD{ + Userid: "123456", + Action: 10000, + Data: inner, + } + + errs = validate.Struct(s4) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "ExternalCMD.Data.Name", "Name", "required") + + type TestMapStructPtr struct { + Errs map[int]interface{} `validate:"gt=0,dive,len=2"` + } + + mip := map[int]interface{}{0: &Inner{"ok"}, 3: nil, 4: &Inner{"ok"}} + + msp := &TestMapStructPtr{ + Errs: mip, + } + + errs = validate.Struct(msp) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "TestMapStructPtr.Errs[3]", "Errs[3]", "len") + + type TestMultiDimensionalStructs struct { + Errs [][]interface{} `validate:"gt=0,dive,dive"` + } + + var errStructArray [][]interface{} + + errStructArray = append(errStructArray, []interface{}{&Inner{"ok"}, &Inner{""}, &Inner{""}}) + errStructArray = append(errStructArray, []interface{}{&Inner{"ok"}, &Inner{""}, &Inner{""}}) + + tms := &TestMultiDimensionalStructs{ + Errs: errStructArray, + } + + errs = validate.Struct(tms) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 4) + AssertError(t, errs, "TestMultiDimensionalStructs.Errs[0][1].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructs.Errs[0][2].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructs.Errs[1][1].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructs.Errs[1][2].Name", "Name", "required") + + type TestMultiDimensionalStructsPtr2 struct { + Errs [][]*Inner `validate:"gt=0,dive,dive,required"` + } + + var errStructPtr2Array [][]*Inner + + errStructPtr2Array = append(errStructPtr2Array, []*Inner{{"ok"}, {""}, {""}}) + errStructPtr2Array = append(errStructPtr2Array, []*Inner{{"ok"}, {""}, {""}}) + errStructPtr2Array = append(errStructPtr2Array, []*Inner{{"ok"}, {""}, nil}) + + tmsp2 := &TestMultiDimensionalStructsPtr2{ + Errs: errStructPtr2Array, + } + + errs = validate.Struct(tmsp2) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 6) + AssertError(t, errs, "TestMultiDimensionalStructsPtr2.Errs[0][1].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr2.Errs[0][2].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr2.Errs[1][1].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr2.Errs[1][2].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr2.Errs[2][1].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr2.Errs[2][2]", "Errs[2][2]", "required") + + m := map[int]interface{}{0: "ok", 3: "", 4: "ok"} + + errs = validate.Field(m, "len=3,dive,len=2") + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "[3]", "[3]", "len") + + errs = validate.Field(m, "len=2,dive,required") + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "", "", "len") + + arr := []interface{}{"ok", "", "ok"} + + errs = validate.Field(arr, "len=3,dive,len=2") + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "[1]", "[1]", "len") + + errs = validate.Field(arr, "len=2,dive,required") + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "", "", "len") + + type MyStruct struct { + A, B string + C interface{} + } + + var a MyStruct + + a.A = "value" + a.C = "nu" + + errs = validate.Struct(a) + Equal(t, errs, nil) +} + +func TestMapDiveValidation(t *testing.T) { + + n := map[int]interface{}{0: nil} + errs := validate.Field(n, "omitempty,required") + Equal(t, errs, nil) + + m := map[int]string{0: "ok", 3: "", 4: "ok"} + + errs = validate.Field(m, "len=3,dive,required") + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "[3]", "[3]", "required") + + errs = validate.Field(m, "len=2,dive,required") + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "", "", "len") + + type Inner struct { + Name string `validate:"required"` + } + + type TestMapStruct struct { + Errs map[int]Inner `validate:"gt=0,dive"` + } + + mi := map[int]Inner{0: {"ok"}, 3: {""}, 4: {"ok"}} + + ms := &TestMapStruct{ + Errs: mi, + } + + errs = validate.Struct(ms) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "TestMapStruct.Errs[3].Name", "Name", "required") + + // for full test coverage + s := fmt.Sprint(errs.Error()) + NotEqual(t, s, "") + + type TestMapTimeStruct struct { + Errs map[int]*time.Time `validate:"gt=0,dive,required"` + } + + t1 := time.Now().UTC() + + mta := map[int]*time.Time{0: &t1, 3: nil, 4: nil} + + mt := &TestMapTimeStruct{ + Errs: mta, + } + + errs = validate.Struct(mt) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 2) + AssertError(t, errs, "TestMapTimeStruct.Errs[3]", "Errs[3]", "required") + AssertError(t, errs, "TestMapTimeStruct.Errs[4]", "Errs[4]", "required") + + type TestMapStructPtr struct { + Errs map[int]*Inner `validate:"gt=0,dive,required"` + } + + mip := map[int]*Inner{0: {"ok"}, 3: nil, 4: {"ok"}} + + msp := &TestMapStructPtr{ + Errs: mip, + } + + errs = validate.Struct(msp) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "TestMapStructPtr.Errs[3]", "Errs[3]", "required") + + type TestMapStructPtr2 struct { + Errs map[int]*Inner `validate:"gt=0,dive,omitempty,required"` + } + + mip2 := map[int]*Inner{0: {"ok"}, 3: nil, 4: {"ok"}} + + msp2 := &TestMapStructPtr2{ + Errs: mip2, + } + + errs = validate.Struct(msp2) + Equal(t, errs, nil) +} + +func TestArrayDiveValidation(t *testing.T) { + + arr := []string{"ok", "", "ok"} + + errs := validate.Field(arr, "len=3,dive,required") + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "[1]", "[1]", "required") + + errs = validate.Field(arr, "len=2,dive,required") + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "", "", "len") + + type BadDive struct { + Name string `validate:"dive"` + } + + bd := &BadDive{ + Name: "TEST", + } + + PanicMatches(t, func() { validate.Struct(bd) }, "dive error! can't dive on a non slice or map") + + type Test struct { + Errs []string `validate:"gt=0,dive,required"` + } + + test := &Test{ + Errs: []string{"ok", "", "ok"}, + } + + errs = validate.Struct(test) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "Test.Errs[1]", "Errs[1]", "required") + + test = &Test{ + Errs: []string{"ok", "ok", ""}, + } + + errs = validate.Struct(test) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 1) + AssertError(t, errs, "Test.Errs[2]", "Errs[2]", "required") + + type TestMultiDimensional struct { + Errs [][]string `validate:"gt=0,dive,dive,required"` + } + + var errArray [][]string + + errArray = append(errArray, []string{"ok", "", ""}) + errArray = append(errArray, []string{"ok", "", ""}) + + tm := &TestMultiDimensional{ + Errs: errArray, + } + + errs = validate.Struct(tm) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 4) + AssertError(t, errs, "TestMultiDimensional.Errs[0][1]", "Errs[0][1]", "required") + AssertError(t, errs, "TestMultiDimensional.Errs[0][2]", "Errs[0][2]", "required") + AssertError(t, errs, "TestMultiDimensional.Errs[1][1]", "Errs[1][1]", "required") + AssertError(t, errs, "TestMultiDimensional.Errs[1][2]", "Errs[1][2]", "required") + + type Inner struct { + Name string `validate:"required"` + } + + type TestMultiDimensionalStructs struct { + Errs [][]Inner `validate:"gt=0,dive,dive"` + } + + var errStructArray [][]Inner + + errStructArray = append(errStructArray, []Inner{{"ok"}, {""}, {""}}) + errStructArray = append(errStructArray, []Inner{{"ok"}, {""}, {""}}) + + tms := &TestMultiDimensionalStructs{ + Errs: errStructArray, + } + + errs = validate.Struct(tms) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 4) + AssertError(t, errs, "TestMultiDimensionalStructs.Errs[0][1].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructs.Errs[0][2].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructs.Errs[1][1].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructs.Errs[1][2].Name", "Name", "required") + + type TestMultiDimensionalStructsPtr struct { + Errs [][]*Inner `validate:"gt=0,dive,dive"` + } + + var errStructPtrArray [][]*Inner + + errStructPtrArray = append(errStructPtrArray, []*Inner{{"ok"}, {""}, {""}}) + errStructPtrArray = append(errStructPtrArray, []*Inner{{"ok"}, {""}, {""}}) + errStructPtrArray = append(errStructPtrArray, []*Inner{{"ok"}, {""}, nil}) + + tmsp := &TestMultiDimensionalStructsPtr{ + Errs: errStructPtrArray, + } + + errs = validate.Struct(tmsp) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 5) + AssertError(t, errs, "TestMultiDimensionalStructsPtr.Errs[0][1].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr.Errs[0][2].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr.Errs[1][1].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr.Errs[1][2].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr.Errs[2][1].Name", "Name", "required") + + // for full test coverage + s := fmt.Sprint(errs.Error()) + NotEqual(t, s, "") + + type TestMultiDimensionalStructsPtr2 struct { + Errs [][]*Inner `validate:"gt=0,dive,dive,required"` + } + + var errStructPtr2Array [][]*Inner + + errStructPtr2Array = append(errStructPtr2Array, []*Inner{{"ok"}, {""}, {""}}) + errStructPtr2Array = append(errStructPtr2Array, []*Inner{{"ok"}, {""}, {""}}) + errStructPtr2Array = append(errStructPtr2Array, []*Inner{{"ok"}, {""}, nil}) + + tmsp2 := &TestMultiDimensionalStructsPtr2{ + Errs: errStructPtr2Array, + } + + errs = validate.Struct(tmsp2) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 6) + AssertError(t, errs, "TestMultiDimensionalStructsPtr2.Errs[0][1].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr2.Errs[0][2].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr2.Errs[1][1].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr2.Errs[1][2].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr2.Errs[2][1].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr2.Errs[2][2]", "Errs[2][2]", "required") + + type TestMultiDimensionalStructsPtr3 struct { + Errs [][]*Inner `validate:"gt=0,dive,dive,omitempty"` + } + + var errStructPtr3Array [][]*Inner + + errStructPtr3Array = append(errStructPtr3Array, []*Inner{{"ok"}, {""}, {""}}) + errStructPtr3Array = append(errStructPtr3Array, []*Inner{{"ok"}, {""}, {""}}) + errStructPtr3Array = append(errStructPtr3Array, []*Inner{{"ok"}, {""}, nil}) + + tmsp3 := &TestMultiDimensionalStructsPtr3{ + Errs: errStructPtr3Array, + } + + errs = validate.Struct(tmsp3) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 5) + AssertError(t, errs, "TestMultiDimensionalStructsPtr3.Errs[0][1].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr3.Errs[0][2].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr3.Errs[1][1].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr3.Errs[1][2].Name", "Name", "required") + AssertError(t, errs, "TestMultiDimensionalStructsPtr3.Errs[2][1].Name", "Name", "required") + + type TestMultiDimensionalTimeTime struct { + Errs [][]*time.Time `validate:"gt=0,dive,dive,required"` + } + + var errTimePtr3Array [][]*time.Time + + t1 := time.Now().UTC() + t2 := time.Now().UTC() + t3 := time.Now().UTC().Add(time.Hour * 24) + + errTimePtr3Array = append(errTimePtr3Array, []*time.Time{&t1, &t2, &t3}) + errTimePtr3Array = append(errTimePtr3Array, []*time.Time{&t1, &t2, nil}) + errTimePtr3Array = append(errTimePtr3Array, []*time.Time{&t1, nil, nil}) + + tmtp3 := &TestMultiDimensionalTimeTime{ + Errs: errTimePtr3Array, + } + + errs = validate.Struct(tmtp3) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 3) + AssertError(t, errs, "TestMultiDimensionalTimeTime.Errs[1][2]", "Errs[1][2]", "required") + AssertError(t, errs, "TestMultiDimensionalTimeTime.Errs[2][1]", "Errs[2][1]", "required") + AssertError(t, errs, "TestMultiDimensionalTimeTime.Errs[2][2]", "Errs[2][2]", "required") + + type TestMultiDimensionalTimeTime2 struct { + Errs [][]*time.Time `validate:"gt=0,dive,dive,required"` + } + + var errTimeArray [][]*time.Time + + t1 = time.Now().UTC() + t2 = time.Now().UTC() + t3 = time.Now().UTC().Add(time.Hour * 24) + + errTimeArray = append(errTimeArray, []*time.Time{&t1, &t2, &t3}) + errTimeArray = append(errTimeArray, []*time.Time{&t1, &t2, nil}) + errTimeArray = append(errTimeArray, []*time.Time{&t1, nil, nil}) + + tmtp := &TestMultiDimensionalTimeTime2{ + Errs: errTimeArray, + } + + errs = validate.Struct(tmtp) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 3) + AssertError(t, errs, "TestMultiDimensionalTimeTime2.Errs[1][2]", "Errs[1][2]", "required") + AssertError(t, errs, "TestMultiDimensionalTimeTime2.Errs[2][1]", "Errs[2][1]", "required") + AssertError(t, errs, "TestMultiDimensionalTimeTime2.Errs[2][2]", "Errs[2][2]", "required") +} + +func TestNilStructPointerValidation(t *testing.T) { + type Inner struct { + Data string + } + + type Outer struct { + Inner *Inner `validate:"omitempty"` + } + + inner := &Inner{ + Data: "test", + } + + outer := &Outer{ + Inner: inner, + } + + errs := validate.Struct(outer) + Equal(t, errs, nil) + + outer = &Outer{ + Inner: nil, + } + + errs = validate.Struct(outer) + Equal(t, errs, nil) + + type Inner2 struct { + Data string + } + + type Outer2 struct { + Inner2 *Inner2 `validate:"required"` + } + + inner2 := &Inner2{ + Data: "test", + } + + outer2 := &Outer2{ + Inner2: inner2, + } + + errs = validate.Struct(outer2) + Equal(t, errs, nil) + + outer2 = &Outer2{ + Inner2: nil, + } + + errs = validate.Struct(outer2) + NotEqual(t, errs, nil) + AssertError(t, errs, "Outer2.Inner2", "Inner2", "required") + + type Inner3 struct { + Data string + } + + type Outer3 struct { + Inner3 *Inner3 + } + + inner3 := &Inner3{ + Data: "test", + } + + outer3 := &Outer3{ + Inner3: inner3, + } + + errs = validate.Struct(outer3) + Equal(t, errs, nil) + + type Inner4 struct { + Data string + } + + type Outer4 struct { + Inner4 *Inner4 `validate:"-"` + } + + inner4 := &Inner4{ + Data: "test", + } + + outer4 := &Outer4{ + Inner4: inner4, + } + + errs = validate.Struct(outer4) + Equal(t, errs, nil) +} + +func TestSSNValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {"00-90-8787", false}, + {"66690-76", false}, + {"191 60 2869", true}, + {"191-60-2869", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "ssn") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d SSN failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d SSN failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "ssn" { + t.Fatalf("Index: %d Latitude failed Error: %s", i, errs) + } + } + } + } +} + +func TestLongitudeValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {"-180.000", true}, + {"180.1", false}, + {"+73.234", true}, + {"+382.3811", false}, + {"23.11111111", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "longitude") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d Longitude failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d Longitude failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "longitude" { + t.Fatalf("Index: %d Latitude failed Error: %s", i, errs) + } + } + } + } +} + +func TestLatitudeValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {"-90.000", true}, + {"+90", true}, + {"47.1231231", true}, + {"+99.9", false}, + {"108", false}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "latitude") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d Latitude failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d Latitude failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "latitude" { + t.Fatalf("Index: %d Latitude failed Error: %s", i, errs) + } + } + } + } +} + +func TestDataURIValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"data:image/png;base64,TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4=", true}, + {"data:text/plain;base64,Vml2YW11cyBmZXJtZW50dW0gc2VtcGVyIHBvcnRhLg==", true}, + {"image/gif;base64,U3VzcGVuZGlzc2UgbGVjdHVzIGxlbw==", false}, + {"data:image/gif;base64,MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuMPNS1Ufof9EW/M98FNw" + + "UAKrwflsqVxaxQjBQnHQmiI7Vac40t8x7pIb8gLGV6wL7sBTJiPovJ0V7y7oc0Ye" + + "rhKh0Rm4skP2z/jHwwZICgGzBvA0rH8xlhUiTvcwDCJ0kc+fh35hNt8srZQM4619" + + "FTgB66Xmp4EtVyhpQV+t02g6NzK72oZI0vnAvqhpkxLeLiMCyrI416wHm5Tkukhx" + + "QmcL2a6hNOyu0ixX/x2kSFXApEnVrJ+/IxGyfyw8kf4N2IZpW5nEP847lpfj0SZZ" + + "Fwrd1mnfnDbYohX2zRptLy2ZUn06Qo9pkG5ntvFEPo9bfZeULtjYzIl6K8gJ2uGZ" + "HQIDAQAB", true}, + {"data:image/png;base64,12345", false}, + {"", false}, + {"data:text,:;base85,U3VzcGVuZGlzc2UgbGVjdHVzIGxlbw==", false}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "datauri") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d DataURI failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d DataURI failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "datauri" { + t.Fatalf("Index: %d DataURI failed Error: %s", i, errs) + } + } + } + } +} + +func TestMultibyteValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", true}, + {"abc", false}, + {"123", false}, + {"<>@;.-=", false}, + {"ひらがな・カタカナ、.漢字", true}, + {"あいうえお foobar", true}, + {"test@example.com", true}, + {"test@example.com", true}, + {"1234abcDExyz", true}, + {"カタカナ", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "multibyte") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d Multibyte failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d Multibyte failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "multibyte" { + t.Fatalf("Index: %d Multibyte failed Error: %s", i, errs) + } + } + } + } +} + +func TestPrintableASCIIValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", true}, + {"foobar", false}, + {"xyz098", false}, + {"123456", false}, + {"カタカナ", false}, + {"foobar", true}, + {"0987654321", true}, + {"test@example.com", true}, + {"1234abcDEF", true}, + {"newline\n", false}, + {"\x19test\x7F", false}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "printascii") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d Printable ASCII failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d Printable ASCII failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "printascii" { + t.Fatalf("Index: %d Printable ASCII failed Error: %s", i, errs) + } + } + } + } +} + +func TestASCIIValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", true}, + {"foobar", false}, + {"xyz098", false}, + {"123456", false}, + {"カタカナ", false}, + {"foobar", true}, + {"0987654321", true}, + {"test@example.com", true}, + {"1234abcDEF", true}, + {"", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "ascii") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d ASCII failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d ASCII failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "ascii" { + t.Fatalf("Index: %d ASCII failed Error: %s", i, errs) + } + } + } + } +} + +func TestUUID5Validation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + + {"", false}, + {"xxxa987fbc9-4bed-3078-cf07-9141ba07c9f3", false}, + {"9c858901-8a57-4791-81fe-4c455b099bc9", false}, + {"a987fbc9-4bed-3078-cf07-9141ba07c9f3", false}, + {"987fbc97-4bed-5078-af07-9141ba07c9f3", true}, + {"987fbc97-4bed-5078-9f07-9141ba07c9f3", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "uuid5") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d UUID5 failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d UUID5 failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "uuid5" { + t.Fatalf("Index: %d UUID5 failed Error: %s", i, errs) + } + } + } + } +} + +func TestUUID4Validation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {"xxxa987fbc9-4bed-3078-cf07-9141ba07c9f3", false}, + {"a987fbc9-4bed-5078-af07-9141ba07c9f3", false}, + {"934859", false}, + {"57b73598-8764-4ad0-a76a-679bb6640eb1", true}, + {"625e63f3-58f5-40b7-83a1-a72ad31acffb", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "uuid4") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d UUID4 failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d UUID4 failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "uuid4" { + t.Fatalf("Index: %d UUID4 failed Error: %s", i, errs) + } + } + } + } +} + +func TestUUID3Validation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {"412452646", false}, + {"xxxa987fbc9-4bed-3078-cf07-9141ba07c9f3", false}, + {"a987fbc9-4bed-4078-8f07-9141ba07c9f3", false}, + {"a987fbc9-4bed-3078-cf07-9141ba07c9f3", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "uuid3") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d UUID3 failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d UUID3 failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "uuid3" { + t.Fatalf("Index: %d UUID3 failed Error: %s", i, errs) + } + } + } + } +} + +func TestUUIDValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {"xxxa987fbc9-4bed-3078-cf07-9141ba07c9f3", false}, + {"a987fbc9-4bed-3078-cf07-9141ba07c9f3xxx", false}, + {"a987fbc94bed3078cf079141ba07c9f3", false}, + {"934859", false}, + {"987fbc9-4bed-3078-cf07a-9141ba07c9f3", false}, + {"aaaaaaaa-1111-1111-aaag-111111111111", false}, + {"a987fbc9-4bed-3078-cf07-9141ba07c9f3", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "uuid") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d UUID failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d UUID failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "uuid" { + t.Fatalf("Index: %d UUID failed Error: %s", i, errs) + } + } + } + } +} + +func TestISBNValidation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {"foo", false}, + {"3836221195", true}, + {"1-61729-085-8", true}, + {"3 423 21412 0", true}, + {"3 401 01319 X", true}, + {"9784873113685", true}, + {"978-4-87311-368-5", true}, + {"978 3401013190", true}, + {"978-3-8362-2119-1", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "isbn") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d ISBN failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d ISBN failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "isbn" { + t.Fatalf("Index: %d ISBN failed Error: %s", i, errs) + } + } + } + } +} + +func TestISBN13Validation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {"foo", false}, + {"3-8362-2119-5", false}, + {"01234567890ab", false}, + {"978 3 8362 2119 0", false}, + {"9784873113685", true}, + {"978-4-87311-368-5", true}, + {"978 3401013190", true}, + {"978-3-8362-2119-1", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "isbn13") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d ISBN13 failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d ISBN13 failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "isbn13" { + t.Fatalf("Index: %d ISBN13 failed Error: %s", i, errs) + } + } + } + } +} + +func TestISBN10Validation(t *testing.T) { + tests := []struct { + param string + expected bool + }{ + {"", false}, + {"foo", false}, + {"3423214121", false}, + {"978-3836221191", false}, + {"3-423-21412-1", false}, + {"3 423 21412 1", false}, + {"3836221195", true}, + {"1-61729-085-8", true}, + {"3 423 21412 0", true}, + {"3 401 01319 X", true}, + } + + for i, test := range tests { + + errs := validate.Field(test.param, "isbn10") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d ISBN10 failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d ISBN10 failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "isbn10" { + t.Fatalf("Index: %d ISBN10 failed Error: %s", i, errs) + } + } + } + } +} + +func TestExcludesRuneValidation(t *testing.T) { + + tests := []struct { + Value string `validate:"excludesrune=☻"` + Tag string + ExpectedNil bool + }{ + {Value: "a☺b☻c☹d", Tag: "excludesrune=☻", ExpectedNil: false}, + {Value: "abcd", Tag: "excludesrune=☻", ExpectedNil: true}, + } + + for i, s := range tests { + errs := validate.Field(s.Value, s.Tag) + + if (s.ExpectedNil && errs != nil) || (!s.ExpectedNil && errs == nil) { + t.Fatalf("Index: %d failed Error: %s", i, errs) + } + + errs = validate.Struct(s) + + if (s.ExpectedNil && errs != nil) || (!s.ExpectedNil && errs == nil) { + t.Fatalf("Index: %d failed Error: %s", i, errs) + } + } +} + +func TestExcludesAllValidation(t *testing.T) { + + tests := []struct { + Value string `validate:"excludesall=@!{}[]"` + Tag string + ExpectedNil bool + }{ + {Value: "abcd@!jfk", Tag: "excludesall=@!{}[]", ExpectedNil: false}, + {Value: "abcdefg", Tag: "excludesall=@!{}[]", ExpectedNil: true}, + } + + for i, s := range tests { + errs := validate.Field(s.Value, s.Tag) + + if (s.ExpectedNil && errs != nil) || (!s.ExpectedNil && errs == nil) { + t.Fatalf("Index: %d failed Error: %s", i, errs) + } + + errs = validate.Struct(s) + + if (s.ExpectedNil && errs != nil) || (!s.ExpectedNil && errs == nil) { + t.Fatalf("Index: %d failed Error: %s", i, errs) + } + } + + username := "joeybloggs " + + errs := validate.Field(username, "excludesall=@ ") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "excludesall") + + excluded := "," + + errs = validate.Field(excluded, "excludesall=!@#$%^&*()_+.0x2C?") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "excludesall") + + excluded = "=" + + errs = validate.Field(excluded, "excludesall=!@#$%^&*()_+.0x2C=?") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "excludesall") +} + +func TestExcludesValidation(t *testing.T) { + + tests := []struct { + Value string `validate:"excludes=@"` + Tag string + ExpectedNil bool + }{ + {Value: "abcd@!jfk", Tag: "excludes=@", ExpectedNil: false}, + {Value: "abcdq!jfk", Tag: "excludes=@", ExpectedNil: true}, + } + + for i, s := range tests { + errs := validate.Field(s.Value, s.Tag) + + if (s.ExpectedNil && errs != nil) || (!s.ExpectedNil && errs == nil) { + t.Fatalf("Index: %d failed Error: %s", i, errs) + } + + errs = validate.Struct(s) + + if (s.ExpectedNil && errs != nil) || (!s.ExpectedNil && errs == nil) { + t.Fatalf("Index: %d failed Error: %s", i, errs) + } + } +} + +func TestContainsRuneValidation(t *testing.T) { + + tests := []struct { + Value string `validate:"containsrune=☻"` + Tag string + ExpectedNil bool + }{ + {Value: "a☺b☻c☹d", Tag: "containsrune=☻", ExpectedNil: true}, + {Value: "abcd", Tag: "containsrune=☻", ExpectedNil: false}, + } + + for i, s := range tests { + errs := validate.Field(s.Value, s.Tag) + + if (s.ExpectedNil && errs != nil) || (!s.ExpectedNil && errs == nil) { + t.Fatalf("Index: %d failed Error: %s", i, errs) + } + + errs = validate.Struct(s) + + if (s.ExpectedNil && errs != nil) || (!s.ExpectedNil && errs == nil) { + t.Fatalf("Index: %d failed Error: %s", i, errs) + } + } +} + +func TestContainsAnyValidation(t *testing.T) { + + tests := []struct { + Value string `validate:"containsany=@!{}[]"` + Tag string + ExpectedNil bool + }{ + {Value: "abcd@!jfk", Tag: "containsany=@!{}[]", ExpectedNil: true}, + {Value: "abcdefg", Tag: "containsany=@!{}[]", ExpectedNil: false}, + } + + for i, s := range tests { + errs := validate.Field(s.Value, s.Tag) + + if (s.ExpectedNil && errs != nil) || (!s.ExpectedNil && errs == nil) { + t.Fatalf("Index: %d failed Error: %s", i, errs) + } + + errs = validate.Struct(s) + + if (s.ExpectedNil && errs != nil) || (!s.ExpectedNil && errs == nil) { + t.Fatalf("Index: %d failed Error: %s", i, errs) + } + } +} + +func TestContainsValidation(t *testing.T) { + + tests := []struct { + Value string `validate:"contains=@"` + Tag string + ExpectedNil bool + }{ + {Value: "abcd@!jfk", Tag: "contains=@", ExpectedNil: true}, + {Value: "abcdq!jfk", Tag: "contains=@", ExpectedNil: false}, + } + + for i, s := range tests { + errs := validate.Field(s.Value, s.Tag) + + if (s.ExpectedNil && errs != nil) || (!s.ExpectedNil && errs == nil) { + t.Fatalf("Index: %d failed Error: %s", i, errs) + } + + errs = validate.Struct(s) + + if (s.ExpectedNil && errs != nil) || (!s.ExpectedNil && errs == nil) { + t.Fatalf("Index: %d failed Error: %s", i, errs) + } + } +} + +func TestIsNeFieldValidation(t *testing.T) { + + var j uint64 + var k float64 + s := "abcd" + i := 1 + j = 1 + k = 1.543 + arr := []string{"test"} + now := time.Now().UTC() + + var j2 uint64 + var k2 float64 + s2 := "abcdef" + i2 := 3 + j2 = 2 + k2 = 1.5434456 + arr2 := []string{"test", "test2"} + arr3 := []string{"test"} + now2 := now + + errs := validate.FieldWithValue(s, s2, "nefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(i2, i, "nefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(j2, j, "nefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(k2, k, "nefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(arr2, arr, "nefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(now2, now, "nefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "nefield") + + errs = validate.FieldWithValue(arr3, arr, "nefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "nefield") + + type Test struct { + Start *time.Time `validate:"nefield=End"` + End *time.Time + } + + sv := &Test{ + Start: &now, + End: &now, + } + + errs = validate.Struct(sv) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test.Start", "Start", "nefield") + + now3 := time.Now().UTC() + + sv = &Test{ + Start: &now, + End: &now3, + } + + errs = validate.Struct(sv) + Equal(t, errs, nil) + + errs = validate.FieldWithValue(nil, 1, "nefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(sv, now, "nefield") + Equal(t, errs, nil) + + type Test2 struct { + Start *time.Time `validate:"nefield=NonExistantField"` + End *time.Time + } + + sv2 := &Test2{ + Start: &now, + End: &now, + } + + errs = validate.Struct(sv2) + Equal(t, errs, nil) +} + +func TestIsNeValidation(t *testing.T) { + + var j uint64 + var k float64 + s := "abcdef" + i := 3 + j = 2 + k = 1.5434 + arr := []string{"test"} + now := time.Now().UTC() + + errs := validate.Field(s, "ne=abcd") + Equal(t, errs, nil) + + errs = validate.Field(i, "ne=1") + Equal(t, errs, nil) + + errs = validate.Field(j, "ne=1") + Equal(t, errs, nil) + + errs = validate.Field(k, "ne=1.543") + Equal(t, errs, nil) + + errs = validate.Field(arr, "ne=2") + Equal(t, errs, nil) + + errs = validate.Field(arr, "ne=1") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ne") + + PanicMatches(t, func() { validate.Field(now, "ne=now") }, "Bad field type time.Time") +} + +func TestIsEqFieldValidation(t *testing.T) { + + var j uint64 + var k float64 + s := "abcd" + i := 1 + j = 1 + k = 1.543 + arr := []string{"test"} + now := time.Now().UTC() + + var j2 uint64 + var k2 float64 + s2 := "abcd" + i2 := 1 + j2 = 1 + k2 = 1.543 + arr2 := []string{"test"} + arr3 := []string{"test", "test2"} + now2 := now + + errs := validate.FieldWithValue(s, s2, "eqfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(i2, i, "eqfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(j2, j, "eqfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(k2, k, "eqfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(arr2, arr, "eqfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(now2, now, "eqfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(arr3, arr, "eqfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "eqfield") + + type Test struct { + Start *time.Time `validate:"eqfield=End"` + End *time.Time + } + + sv := &Test{ + Start: &now, + End: &now, + } + + errs = validate.Struct(sv) + Equal(t, errs, nil) + + now3 := time.Now().UTC() + + sv = &Test{ + Start: &now, + End: &now3, + } + + errs = validate.Struct(sv) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test.Start", "Start", "eqfield") + + errs = validate.FieldWithValue(nil, 1, "eqfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "eqfield") + + channel := make(chan string) + errs = validate.FieldWithValue(5, channel, "eqfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "eqfield") + + errs = validate.FieldWithValue(5, now, "eqfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "eqfield") + + type Test2 struct { + Start *time.Time `validate:"eqfield=NonExistantField"` + End *time.Time + } + + sv2 := &Test2{ + Start: &now, + End: &now, + } + + errs = validate.Struct(sv2) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test2.Start", "Start", "eqfield") + + type Inner struct { + Name string + } + + type TStruct struct { + Inner *Inner + CreatedAt *time.Time `validate:"eqfield=Inner"` + } + + inner := &Inner{ + Name: "NAME", + } + + test := &TStruct{ + Inner: inner, + CreatedAt: &now, + } + + errs = validate.Struct(test) + NotEqual(t, errs, nil) + AssertError(t, errs, "TStruct.CreatedAt", "CreatedAt", "eqfield") +} + +func TestIsEqValidation(t *testing.T) { + + var j uint64 + var k float64 + s := "abcd" + i := 1 + j = 1 + k = 1.543 + arr := []string{"test"} + now := time.Now().UTC() + + errs := validate.Field(s, "eq=abcd") + Equal(t, errs, nil) + + errs = validate.Field(i, "eq=1") + Equal(t, errs, nil) + + errs = validate.Field(j, "eq=1") + Equal(t, errs, nil) + + errs = validate.Field(k, "eq=1.543") + Equal(t, errs, nil) + + errs = validate.Field(arr, "eq=1") + Equal(t, errs, nil) + + errs = validate.Field(arr, "eq=2") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "eq") + + PanicMatches(t, func() { validate.Field(now, "eq=now") }, "Bad field type time.Time") +} + +func TestBase64Validation(t *testing.T) { + + s := "dW5pY29ybg==" + + errs := validate.Field(s, "base64") + Equal(t, errs, nil) + + s = "dGhpIGlzIGEgdGVzdCBiYXNlNjQ=" + errs = validate.Field(s, "base64") + Equal(t, errs, nil) + + s = "" + errs = validate.Field(s, "base64") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "base64") + + s = "dW5pY29ybg== foo bar" + errs = validate.Field(s, "base64") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "base64") +} + +func TestNoStructLevelValidation(t *testing.T) { + + type Inner struct { + Test string `validate:"len=5"` + } + + type Outer struct { + InnerStruct *Inner `validate:"required,nostructlevel"` + } + + outer := &Outer{ + InnerStruct: nil, + } + + errs := validate.Struct(outer) + NotEqual(t, errs, nil) + AssertError(t, errs, "Outer.InnerStruct", "InnerStruct", "required") + + inner := &Inner{ + Test: "1234", + } + + outer = &Outer{ + InnerStruct: inner, + } + + errs = validate.Struct(outer) + Equal(t, errs, nil) +} + +func TestStructOnlyValidation(t *testing.T) { + + type Inner struct { + Test string `validate:"len=5"` + } + + type Outer struct { + InnerStruct *Inner `validate:"required,structonly"` + } + + outer := &Outer{ + InnerStruct: nil, + } + + errs := validate.Struct(outer) + NotEqual(t, errs, nil) + AssertError(t, errs, "Outer.InnerStruct", "InnerStruct", "required") + + inner := &Inner{ + Test: "1234", + } + + outer = &Outer{ + InnerStruct: inner, + } + + errs = validate.Struct(outer) + Equal(t, errs, nil) +} + +func TestGtField(t *testing.T) { + + type TimeTest struct { + Start *time.Time `validate:"required,gt"` + End *time.Time `validate:"required,gt,gtfield=Start"` + } + + now := time.Now() + start := now.Add(time.Hour * 24) + end := start.Add(time.Hour * 24) + + timeTest := &TimeTest{ + Start: &start, + End: &end, + } + + errs := validate.Struct(timeTest) + Equal(t, errs, nil) + + timeTest = &TimeTest{ + Start: &end, + End: &start, + } + + errs = validate.Struct(timeTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "TimeTest.End", "End", "gtfield") + + errs = validate.FieldWithValue(&start, &end, "gtfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(&end, &start, "gtfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtfield") + + errs = validate.FieldWithValue(&timeTest, &end, "gtfield") + NotEqual(t, errs, nil) + + errs = validate.FieldWithValue("test", "test bigger", "gtfield") + Equal(t, errs, nil) + + type IntTest struct { + Val1 int `validate:"required"` + Val2 int `validate:"required,gtfield=Val1"` + } + + intTest := &IntTest{ + Val1: 1, + Val2: 5, + } + + errs = validate.Struct(intTest) + Equal(t, errs, nil) + + intTest = &IntTest{ + Val1: 5, + Val2: 1, + } + + errs = validate.Struct(intTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "IntTest.Val2", "Val2", "gtfield") + + errs = validate.FieldWithValue(int(1), int(5), "gtfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(int(5), int(1), "gtfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtfield") + + type UIntTest struct { + Val1 uint `validate:"required"` + Val2 uint `validate:"required,gtfield=Val1"` + } + + uIntTest := &UIntTest{ + Val1: 1, + Val2: 5, + } + + errs = validate.Struct(uIntTest) + Equal(t, errs, nil) + + uIntTest = &UIntTest{ + Val1: 5, + Val2: 1, + } + + errs = validate.Struct(uIntTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "UIntTest.Val2", "Val2", "gtfield") + + errs = validate.FieldWithValue(uint(1), uint(5), "gtfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(uint(5), uint(1), "gtfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtfield") + + type FloatTest struct { + Val1 float64 `validate:"required"` + Val2 float64 `validate:"required,gtfield=Val1"` + } + + floatTest := &FloatTest{ + Val1: 1, + Val2: 5, + } + + errs = validate.Struct(floatTest) + Equal(t, errs, nil) + + floatTest = &FloatTest{ + Val1: 5, + Val2: 1, + } + + errs = validate.Struct(floatTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "FloatTest.Val2", "Val2", "gtfield") + + errs = validate.FieldWithValue(float32(1), float32(5), "gtfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(float32(5), float32(1), "gtfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtfield") + + errs = validate.FieldWithValue(nil, 1, "gtfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtfield") + + errs = validate.FieldWithValue(5, "T", "gtfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtfield") + + errs = validate.FieldWithValue(5, start, "gtfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtfield") + + type TimeTest2 struct { + Start *time.Time `validate:"required"` + End *time.Time `validate:"required,gtfield=NonExistantField"` + } + + timeTest2 := &TimeTest2{ + Start: &start, + End: &end, + } + + errs = validate.Struct(timeTest2) + NotEqual(t, errs, nil) + AssertError(t, errs, "TimeTest2.End", "End", "gtfield") +} + +func TestLtField(t *testing.T) { + + type TimeTest struct { + Start *time.Time `validate:"required,lt,ltfield=End"` + End *time.Time `validate:"required,lt"` + } + + now := time.Now() + start := now.Add(time.Hour * 24 * -1 * 2) + end := start.Add(time.Hour * 24) + + timeTest := &TimeTest{ + Start: &start, + End: &end, + } + + errs := validate.Struct(timeTest) + Equal(t, errs, nil) + + timeTest = &TimeTest{ + Start: &end, + End: &start, + } + + errs = validate.Struct(timeTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "TimeTest.Start", "Start", "ltfield") + + errs = validate.FieldWithValue(&end, &start, "ltfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(&start, &end, "ltfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltfield") + + errs = validate.FieldWithValue(timeTest, &end, "ltfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltfield") + + errs = validate.FieldWithValue("test", "tes", "ltfield") + Equal(t, errs, nil) + + type IntTest struct { + Val1 int `validate:"required"` + Val2 int `validate:"required,ltfield=Val1"` + } + + intTest := &IntTest{ + Val1: 5, + Val2: 1, + } + + errs = validate.Struct(intTest) + Equal(t, errs, nil) + + intTest = &IntTest{ + Val1: 1, + Val2: 5, + } + + errs = validate.Struct(intTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "IntTest.Val2", "Val2", "ltfield") + + errs = validate.FieldWithValue(int(5), int(1), "ltfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(int(1), int(5), "ltfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltfield") + + type UIntTest struct { + Val1 uint `validate:"required"` + Val2 uint `validate:"required,ltfield=Val1"` + } + + uIntTest := &UIntTest{ + Val1: 5, + Val2: 1, + } + + errs = validate.Struct(uIntTest) + Equal(t, errs, nil) + + uIntTest = &UIntTest{ + Val1: 1, + Val2: 5, + } + + errs = validate.Struct(uIntTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "UIntTest.Val2", "Val2", "ltfield") + + errs = validate.FieldWithValue(uint(5), uint(1), "ltfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(uint(1), uint(5), "ltfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltfield") + + type FloatTest struct { + Val1 float64 `validate:"required"` + Val2 float64 `validate:"required,ltfield=Val1"` + } + + floatTest := &FloatTest{ + Val1: 5, + Val2: 1, + } + + errs = validate.Struct(floatTest) + Equal(t, errs, nil) + + floatTest = &FloatTest{ + Val1: 1, + Val2: 5, + } + + errs = validate.Struct(floatTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "FloatTest.Val2", "Val2", "ltfield") + + errs = validate.FieldWithValue(float32(5), float32(1), "ltfield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(float32(1), float32(5), "ltfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltfield") + + errs = validate.FieldWithValue(nil, 5, "ltfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltfield") + + errs = validate.FieldWithValue(1, "T", "ltfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltfield") + + errs = validate.FieldWithValue(1, end, "ltfield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltfield") + + type TimeTest2 struct { + Start *time.Time `validate:"required"` + End *time.Time `validate:"required,ltfield=NonExistantField"` + } + + timeTest2 := &TimeTest2{ + Start: &end, + End: &start, + } + + errs = validate.Struct(timeTest2) + NotEqual(t, errs, nil) + AssertError(t, errs, "TimeTest2.End", "End", "ltfield") +} + +func TestLteField(t *testing.T) { + + type TimeTest struct { + Start *time.Time `validate:"required,lte,ltefield=End"` + End *time.Time `validate:"required,lte"` + } + + now := time.Now() + start := now.Add(time.Hour * 24 * -1 * 2) + end := start.Add(time.Hour * 24) + + timeTest := &TimeTest{ + Start: &start, + End: &end, + } + + errs := validate.Struct(timeTest) + Equal(t, errs, nil) + + timeTest = &TimeTest{ + Start: &end, + End: &start, + } + + errs = validate.Struct(timeTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "TimeTest.Start", "Start", "ltefield") + + errs = validate.FieldWithValue(&end, &start, "ltefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(&start, &end, "ltefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltefield") + + errs = validate.FieldWithValue(timeTest, &end, "ltefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltefield") + + errs = validate.FieldWithValue("test", "tes", "ltefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue("test", "test", "ltefield") + Equal(t, errs, nil) + + type IntTest struct { + Val1 int `validate:"required"` + Val2 int `validate:"required,ltefield=Val1"` + } + + intTest := &IntTest{ + Val1: 5, + Val2: 1, + } + + errs = validate.Struct(intTest) + Equal(t, errs, nil) + + intTest = &IntTest{ + Val1: 1, + Val2: 5, + } + + errs = validate.Struct(intTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "IntTest.Val2", "Val2", "ltefield") + + errs = validate.FieldWithValue(int(5), int(1), "ltefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(int(1), int(5), "ltefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltefield") + + type UIntTest struct { + Val1 uint `validate:"required"` + Val2 uint `validate:"required,ltefield=Val1"` + } + + uIntTest := &UIntTest{ + Val1: 5, + Val2: 1, + } + + errs = validate.Struct(uIntTest) + Equal(t, errs, nil) + + uIntTest = &UIntTest{ + Val1: 1, + Val2: 5, + } + + errs = validate.Struct(uIntTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "UIntTest.Val2", "Val2", "ltefield") + + errs = validate.FieldWithValue(uint(5), uint(1), "ltefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(uint(1), uint(5), "ltefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltefield") + + type FloatTest struct { + Val1 float64 `validate:"required"` + Val2 float64 `validate:"required,ltefield=Val1"` + } + + floatTest := &FloatTest{ + Val1: 5, + Val2: 1, + } + + errs = validate.Struct(floatTest) + Equal(t, errs, nil) + + floatTest = &FloatTest{ + Val1: 1, + Val2: 5, + } + + errs = validate.Struct(floatTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "FloatTest.Val2", "Val2", "ltefield") + + errs = validate.FieldWithValue(float32(5), float32(1), "ltefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(float32(1), float32(5), "ltefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltefield") + + errs = validate.FieldWithValue(nil, 5, "ltefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltefield") + + errs = validate.FieldWithValue(1, "T", "ltefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltefield") + + errs = validate.FieldWithValue(1, end, "ltefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "ltefield") + + type TimeTest2 struct { + Start *time.Time `validate:"required"` + End *time.Time `validate:"required,ltefield=NonExistantField"` + } + + timeTest2 := &TimeTest2{ + Start: &end, + End: &start, + } + + errs = validate.Struct(timeTest2) + NotEqual(t, errs, nil) + AssertError(t, errs, "TimeTest2.End", "End", "ltefield") +} + +func TestGteField(t *testing.T) { + + type TimeTest struct { + Start *time.Time `validate:"required,gte"` + End *time.Time `validate:"required,gte,gtefield=Start"` + } + + now := time.Now() + start := now.Add(time.Hour * 24) + end := start.Add(time.Hour * 24) + + timeTest := &TimeTest{ + Start: &start, + End: &end, + } + + errs := validate.Struct(timeTest) + Equal(t, errs, nil) + + timeTest = &TimeTest{ + Start: &end, + End: &start, + } + + errs = validate.Struct(timeTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "TimeTest.End", "End", "gtefield") + + errs = validate.FieldWithValue(&start, &end, "gtefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(&end, &start, "gtefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtefield") + + errs = validate.FieldWithValue(timeTest, &start, "gtefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtefield") + + errs = validate.FieldWithValue("test", "test", "gtefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue("test", "test bigger", "gtefield") + Equal(t, errs, nil) + + type IntTest struct { + Val1 int `validate:"required"` + Val2 int `validate:"required,gtefield=Val1"` + } + + intTest := &IntTest{ + Val1: 1, + Val2: 5, + } + + errs = validate.Struct(intTest) + Equal(t, errs, nil) + + intTest = &IntTest{ + Val1: 5, + Val2: 1, + } + + errs = validate.Struct(intTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "IntTest.Val2", "Val2", "gtefield") + + errs = validate.FieldWithValue(int(1), int(5), "gtefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(int(5), int(1), "gtefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtefield") + + type UIntTest struct { + Val1 uint `validate:"required"` + Val2 uint `validate:"required,gtefield=Val1"` + } + + uIntTest := &UIntTest{ + Val1: 1, + Val2: 5, + } + + errs = validate.Struct(uIntTest) + Equal(t, errs, nil) + + uIntTest = &UIntTest{ + Val1: 5, + Val2: 1, + } + + errs = validate.Struct(uIntTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "UIntTest.Val2", "Val2", "gtefield") + + errs = validate.FieldWithValue(uint(1), uint(5), "gtefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(uint(5), uint(1), "gtefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtefield") + + type FloatTest struct { + Val1 float64 `validate:"required"` + Val2 float64 `validate:"required,gtefield=Val1"` + } + + floatTest := &FloatTest{ + Val1: 1, + Val2: 5, + } + + errs = validate.Struct(floatTest) + Equal(t, errs, nil) + + floatTest = &FloatTest{ + Val1: 5, + Val2: 1, + } + + errs = validate.Struct(floatTest) + NotEqual(t, errs, nil) + AssertError(t, errs, "FloatTest.Val2", "Val2", "gtefield") + + errs = validate.FieldWithValue(float32(1), float32(5), "gtefield") + Equal(t, errs, nil) + + errs = validate.FieldWithValue(float32(5), float32(1), "gtefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtefield") + + errs = validate.FieldWithValue(nil, 1, "gtefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtefield") + + errs = validate.FieldWithValue(5, "T", "gtefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtefield") + + errs = validate.FieldWithValue(5, start, "gtefield") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gtefield") + + type TimeTest2 struct { + Start *time.Time `validate:"required"` + End *time.Time `validate:"required,gtefield=NonExistantField"` + } + + timeTest2 := &TimeTest2{ + Start: &start, + End: &end, + } + + errs = validate.Struct(timeTest2) + NotEqual(t, errs, nil) + AssertError(t, errs, "TimeTest2.End", "End", "gtefield") +} + +func TestValidateByTagAndValue(t *testing.T) { + + val := "test" + field := "test" + errs := validate.FieldWithValue(val, field, "required") + Equal(t, errs, nil) + + fn := func(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + return current.String() == field.String() + } + + validate.RegisterValidation("isequaltestfunc", fn) + + errs = validate.FieldWithValue(val, field, "isequaltestfunc") + Equal(t, errs, nil) + + val = "unequal" + + errs = validate.FieldWithValue(val, field, "isequaltestfunc") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "isequaltestfunc") +} + +func TestAddFunctions(t *testing.T) { + + fn := func(v *Validate, topStruct reflect.Value, currentStruct reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + + return true + } + + config := &Config{ + TagName: "validateme", + } + + validate := New(config) + + errs := validate.RegisterValidation("new", fn) + Equal(t, errs, nil) + + errs = validate.RegisterValidation("", fn) + NotEqual(t, errs, nil) + + validate.RegisterValidation("new", nil) + NotEqual(t, errs, nil) + + errs = validate.RegisterValidation("new", fn) + Equal(t, errs, nil) + + PanicMatches(t, func() { validate.RegisterValidation("dive", fn) }, "Tag 'dive' either contains restricted characters or is the same as a restricted tag needed for normal operation") +} + +func TestChangeTag(t *testing.T) { + + config := &Config{ + TagName: "val", + } + validate := New(config) + + type Test struct { + Name string `val:"len=4"` + } + s := &Test{ + Name: "TEST", + } + + errs := validate.Struct(s) + Equal(t, errs, nil) +} + +func TestUnexposedStruct(t *testing.T) { + + type Test struct { + Name string + unexposed struct { + A string `validate:"required"` + } + } + + s := &Test{ + Name: "TEST", + } + + errs := validate.Struct(s) + Equal(t, errs, nil) +} + +func TestBadParams(t *testing.T) { + + i := 1 + errs := validate.Field(i, "-") + Equal(t, errs, nil) + + PanicMatches(t, func() { validate.Field(i, "len=a") }, "strconv.ParseInt: parsing \"a\": invalid syntax") + PanicMatches(t, func() { validate.Field(i, "len=a") }, "strconv.ParseInt: parsing \"a\": invalid syntax") + + var ui uint = 1 + PanicMatches(t, func() { validate.Field(ui, "len=a") }, "strconv.ParseUint: parsing \"a\": invalid syntax") + + f := 1.23 + PanicMatches(t, func() { validate.Field(f, "len=a") }, "strconv.ParseFloat: parsing \"a\": invalid syntax") +} + +func TestLength(t *testing.T) { + + i := true + PanicMatches(t, func() { validate.Field(i, "len") }, "Bad field type bool") +} + +func TestIsGt(t *testing.T) { + + myMap := map[string]string{} + errs := validate.Field(myMap, "gt=0") + NotEqual(t, errs, nil) + + f := 1.23 + errs = validate.Field(f, "gt=5") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gt") + + var ui uint = 5 + errs = validate.Field(ui, "gt=10") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gt") + + i := true + PanicMatches(t, func() { validate.Field(i, "gt") }, "Bad field type bool") + + tm := time.Now().UTC() + tm = tm.Add(time.Hour * 24) + + errs = validate.Field(tm, "gt") + Equal(t, errs, nil) + + t2 := time.Now().UTC().Add(-time.Hour) + + errs = validate.Field(t2, "gt") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gt") + + type Test struct { + Now *time.Time `validate:"gt"` + } + s := &Test{ + Now: &tm, + } + + errs = validate.Struct(s) + Equal(t, errs, nil) + + s = &Test{ + Now: &t2, + } + + errs = validate.Struct(s) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test.Now", "Now", "gt") +} + +func TestIsGte(t *testing.T) { + + i := true + PanicMatches(t, func() { validate.Field(i, "gte") }, "Bad field type bool") + + t1 := time.Now().UTC() + t1 = t1.Add(time.Hour * 24) + + errs := validate.Field(t1, "gte") + Equal(t, errs, nil) + + t2 := time.Now().UTC().Add(-time.Hour) + + errs = validate.Field(t2, "gte") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "gte") + + type Test struct { + Now *time.Time `validate:"gte"` + } + s := &Test{ + Now: &t1, + } + + errs = validate.Struct(s) + Equal(t, errs, nil) + + s = &Test{ + Now: &t2, + } + + errs = validate.Struct(s) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test.Now", "Now", "gte") +} + +func TestIsLt(t *testing.T) { + + myMap := map[string]string{} + errs := validate.Field(myMap, "lt=0") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "lt") + + f := 1.23 + errs = validate.Field(f, "lt=0") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "lt") + + var ui uint = 5 + errs = validate.Field(ui, "lt=0") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "lt") + + i := true + PanicMatches(t, func() { validate.Field(i, "lt") }, "Bad field type bool") + + t1 := time.Now().UTC().Add(-time.Hour) + + errs = validate.Field(t1, "lt") + Equal(t, errs, nil) + + t2 := time.Now().UTC() + t2 = t2.Add(time.Hour * 24) + + errs = validate.Field(t2, "lt") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "lt") + + type Test struct { + Now *time.Time `validate:"lt"` + } + + s := &Test{ + Now: &t1, + } + + errs = validate.Struct(s) + Equal(t, errs, nil) + + s = &Test{ + Now: &t2, + } + + errs = validate.Struct(s) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test.Now", "Now", "lt") +} + +func TestIsLte(t *testing.T) { + + i := true + PanicMatches(t, func() { validate.Field(i, "lte") }, "Bad field type bool") + + t1 := time.Now().UTC().Add(-time.Hour) + + errs := validate.Field(t1, "lte") + Equal(t, errs, nil) + + t2 := time.Now().UTC() + t2 = t2.Add(time.Hour * 24) + + errs = validate.Field(t2, "lte") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "lte") + + type Test struct { + Now *time.Time `validate:"lte"` + } + + s := &Test{ + Now: &t1, + } + + errs = validate.Struct(s) + Equal(t, errs, nil) + + s = &Test{ + Now: &t2, + } + + errs = validate.Struct(s) + NotEqual(t, errs, nil) +} + +func TestUrl(t *testing.T) { + + var tests = []struct { + param string + expected bool + }{ + {"http://foo.bar#com", true}, + {"http://foobar.com", true}, + {"https://foobar.com", true}, + {"foobar.com", false}, + {"http://foobar.coffee/", true}, + {"http://foobar.中文网/", true}, + {"http://foobar.org/", true}, + {"http://foobar.org:8080/", true}, + {"ftp://foobar.ru/", true}, + {"http://user:pass@www.foobar.com/", true}, + {"http://127.0.0.1/", true}, + {"http://duckduckgo.com/?q=%2F", true}, + {"http://localhost:3000/", true}, + {"http://foobar.com/?foo=bar#baz=qux", true}, + {"http://foobar.com?foo=bar", true}, + {"http://www.xn--froschgrn-x9a.net/", true}, + {"", false}, + {"xyz://foobar.com", true}, + {"invalid.", false}, + {".com", false}, + {"rtmp://foobar.com", true}, + {"http://www.foo_bar.com/", true}, + {"http://localhost:3000/", true}, + {"http://foobar.com/#baz", true}, + {"http://foobar.com#baz=qux", true}, + {"http://foobar.com/t$-_.+!*\\'(),", true}, + {"http://www.foobar.com/~foobar", true}, + {"http://www.-foobar.com/", true}, + {"http://www.foo---bar.com/", true}, + {"mailto:someone@example.com", true}, + {"irc://irc.server.org/channel", true}, + {"irc://#channel@network", true}, + {"/abs/test/dir", false}, + {"./rel/test/dir", false}, + } + for i, test := range tests { + + errs := validate.Field(test.param, "url") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d URL failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d URL failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "url" { + t.Fatalf("Index: %d URL failed Error: %s", i, errs) + } + } + } + } + + i := 1 + PanicMatches(t, func() { validate.Field(i, "url") }, "Bad field type int") +} + +func TestUri(t *testing.T) { + + var tests = []struct { + param string + expected bool + }{ + {"http://foo.bar#com", true}, + {"http://foobar.com", true}, + {"https://foobar.com", true}, + {"foobar.com", false}, + {"http://foobar.coffee/", true}, + {"http://foobar.中文网/", true}, + {"http://foobar.org/", true}, + {"http://foobar.org:8080/", true}, + {"ftp://foobar.ru/", true}, + {"http://user:pass@www.foobar.com/", true}, + {"http://127.0.0.1/", true}, + {"http://duckduckgo.com/?q=%2F", true}, + {"http://localhost:3000/", true}, + {"http://foobar.com/?foo=bar#baz=qux", true}, + {"http://foobar.com?foo=bar", true}, + {"http://www.xn--froschgrn-x9a.net/", true}, + {"", false}, + {"xyz://foobar.com", true}, + {"invalid.", false}, + {".com", false}, + {"rtmp://foobar.com", true}, + {"http://www.foo_bar.com/", true}, + {"http://localhost:3000/", true}, + {"http://foobar.com#baz=qux", true}, + {"http://foobar.com/t$-_.+!*\\'(),", true}, + {"http://www.foobar.com/~foobar", true}, + {"http://www.-foobar.com/", true}, + {"http://www.foo---bar.com/", true}, + {"mailto:someone@example.com", true}, + {"irc://irc.server.org/channel", true}, + {"irc://#channel@network", true}, + {"/abs/test/dir", true}, + {"./rel/test/dir", false}, + } + for i, test := range tests { + + errs := validate.Field(test.param, "uri") + + if test.expected { + if !IsEqual(errs, nil) { + t.Fatalf("Index: %d URI failed Error: %s", i, errs) + } + } else { + if IsEqual(errs, nil) { + t.Fatalf("Index: %d URI failed Error: %s", i, errs) + } else { + val := errs.(ValidationErrors)[""] + if val.Tag != "uri" { + t.Fatalf("Index: %d URI failed Error: %s", i, errs) + } + } + } + } + + i := 1 + PanicMatches(t, func() { validate.Field(i, "uri") }, "Bad field type int") +} + +func TestOrTag(t *testing.T) { + s := "rgba(0,31,255,0.5)" + errs := validate.Field(s, "rgb|rgba") + Equal(t, errs, nil) + + s = "rgba(0,31,255,0.5)" + errs = validate.Field(s, "rgb|rgba|len=18") + Equal(t, errs, nil) + + s = "this ain't right" + errs = validate.Field(s, "rgb|rgba") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "rgb|rgba") + + s = "this ain't right" + errs = validate.Field(s, "rgb|rgba|len=10") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "rgb|rgba|len") + + s = "this is right" + errs = validate.Field(s, "rgb|rgba|len=13") + Equal(t, errs, nil) + + s = "" + errs = validate.Field(s, "omitempty,rgb|rgba") + Equal(t, errs, nil) + + s = "this is right, but a blank or isn't" + + PanicMatches(t, func() { validate.Field(s, "rgb||len=13") }, "Invalid validation tag on field") + PanicMatches(t, func() { validate.Field(s, "rgb|rgbaa|len=13") }, "Undefined validation function on field") +} + +func TestHsla(t *testing.T) { + + s := "hsla(360,100%,100%,1)" + errs := validate.Field(s, "hsla") + Equal(t, errs, nil) + + s = "hsla(360,100%,100%,0.5)" + errs = validate.Field(s, "hsla") + Equal(t, errs, nil) + + s = "hsla(0,0%,0%, 0)" + errs = validate.Field(s, "hsla") + Equal(t, errs, nil) + + s = "hsl(361,100%,50%,1)" + errs = validate.Field(s, "hsla") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hsla") + + s = "hsl(361,100%,50%)" + errs = validate.Field(s, "hsla") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hsla") + + s = "hsla(361,100%,50%)" + errs = validate.Field(s, "hsla") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hsla") + + s = "hsla(360,101%,50%)" + errs = validate.Field(s, "hsla") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hsla") + + s = "hsla(360,100%,101%)" + errs = validate.Field(s, "hsla") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hsla") + + i := 1 + validate.Field(i, "hsla") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hsla") +} + +func TestHsl(t *testing.T) { + + s := "hsl(360,100%,50%)" + errs := validate.Field(s, "hsl") + Equal(t, errs, nil) + + s = "hsl(0,0%,0%)" + errs = validate.Field(s, "hsl") + Equal(t, errs, nil) + + s = "hsl(361,100%,50%)" + errs = validate.Field(s, "hsl") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hsl") + + s = "hsl(361,101%,50%)" + errs = validate.Field(s, "hsl") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hsl") + + s = "hsl(361,100%,101%)" + errs = validate.Field(s, "hsl") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hsl") + + s = "hsl(-10,100%,100%)" + errs = validate.Field(s, "hsl") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hsl") + + i := 1 + errs = validate.Field(i, "hsl") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hsl") +} + +func TestRgba(t *testing.T) { + + s := "rgba(0,31,255,0.5)" + errs := validate.Field(s, "rgba") + Equal(t, errs, nil) + + s = "rgba(0,31,255,0.12)" + errs = validate.Field(s, "rgba") + Equal(t, errs, nil) + + s = "rgba(12%,55%,100%,0.12)" + errs = validate.Field(s, "rgba") + Equal(t, errs, nil) + + s = "rgba( 0, 31, 255, 0.5)" + errs = validate.Field(s, "rgba") + Equal(t, errs, nil) + + s = "rgba(12%,55,100%,0.12)" + errs = validate.Field(s, "rgba") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "rgba") + + s = "rgb(0, 31, 255)" + errs = validate.Field(s, "rgba") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "rgba") + + s = "rgb(1,349,275,0.5)" + errs = validate.Field(s, "rgba") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "rgba") + + s = "rgb(01,31,255,0.5)" + errs = validate.Field(s, "rgba") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "rgba") + + i := 1 + errs = validate.Field(i, "rgba") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "rgba") +} + +func TestRgb(t *testing.T) { + + s := "rgb(0,31,255)" + errs := validate.Field(s, "rgb") + Equal(t, errs, nil) + + s = "rgb(0, 31, 255)" + errs = validate.Field(s, "rgb") + Equal(t, errs, nil) + + s = "rgb(10%, 50%, 100%)" + errs = validate.Field(s, "rgb") + Equal(t, errs, nil) + + s = "rgb(10%, 50%, 55)" + errs = validate.Field(s, "rgb") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "rgb") + + s = "rgb(1,349,275)" + errs = validate.Field(s, "rgb") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "rgb") + + s = "rgb(01,31,255)" + errs = validate.Field(s, "rgb") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "rgb") + + s = "rgba(0,31,255)" + errs = validate.Field(s, "rgb") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "rgb") + + i := 1 + errs = validate.Field(i, "rgb") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "rgb") +} + +func TestEmail(t *testing.T) { + + s := "test@mail.com" + errs := validate.Field(s, "email") + Equal(t, errs, nil) + + s = "Dörte@Sörensen.example.com" + errs = validate.Field(s, "email") + Equal(t, errs, nil) + + s = "θσερ@εχαμπλε.ψομ" + errs = validate.Field(s, "email") + Equal(t, errs, nil) + + s = "юзер@екзампл.ком" + errs = validate.Field(s, "email") + Equal(t, errs, nil) + + s = "उपयोगकर्ता@उदाहरण.कॉम" + errs = validate.Field(s, "email") + Equal(t, errs, nil) + + s = "用户@例子.广告" + errs = validate.Field(s, "email") + Equal(t, errs, nil) + + s = "" + errs = validate.Field(s, "email") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "email") + + s = "test@email" + errs = validate.Field(s, "email") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "email") + + s = "test@email." + errs = validate.Field(s, "email") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "email") + + s = "@email.com" + errs = validate.Field(s, "email") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "email") + + i := true + errs = validate.Field(i, "email") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "email") +} + +func TestHexColor(t *testing.T) { + + s := "#fff" + errs := validate.Field(s, "hexcolor") + Equal(t, errs, nil) + + s = "#c2c2c2" + errs = validate.Field(s, "hexcolor") + Equal(t, errs, nil) + + s = "fff" + errs = validate.Field(s, "hexcolor") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hexcolor") + + s = "fffFF" + errs = validate.Field(s, "hexcolor") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hexcolor") + + i := true + errs = validate.Field(i, "hexcolor") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hexcolor") +} + +func TestHexadecimal(t *testing.T) { + + s := "ff0044" + errs := validate.Field(s, "hexadecimal") + Equal(t, errs, nil) + + s = "abcdefg" + errs = validate.Field(s, "hexadecimal") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hexadecimal") + + i := true + errs = validate.Field(i, "hexadecimal") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "hexadecimal") +} + +func TestNumber(t *testing.T) { + + s := "1" + errs := validate.Field(s, "number") + Equal(t, errs, nil) + + s = "+1" + errs = validate.Field(s, "number") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "number") + + s = "-1" + errs = validate.Field(s, "number") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "number") + + s = "1.12" + errs = validate.Field(s, "number") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "number") + + s = "+1.12" + errs = validate.Field(s, "number") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "number") + + s = "-1.12" + errs = validate.Field(s, "number") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "number") + + s = "1." + errs = validate.Field(s, "number") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "number") + + s = "1.o" + errs = validate.Field(s, "number") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "number") + + i := 1 + errs = validate.Field(i, "number") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "number") +} + +func TestNumeric(t *testing.T) { + + s := "1" + errs := validate.Field(s, "numeric") + Equal(t, errs, nil) + + s = "+1" + errs = validate.Field(s, "numeric") + Equal(t, errs, nil) + + s = "-1" + errs = validate.Field(s, "numeric") + Equal(t, errs, nil) + + s = "1.12" + errs = validate.Field(s, "numeric") + Equal(t, errs, nil) + + s = "+1.12" + errs = validate.Field(s, "numeric") + Equal(t, errs, nil) + + s = "-1.12" + errs = validate.Field(s, "numeric") + Equal(t, errs, nil) + + s = "1." + errs = validate.Field(s, "numeric") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "numeric") + + s = "1.o" + errs = validate.Field(s, "numeric") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "numeric") + + i := 1 + errs = validate.Field(i, "numeric") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "numeric") +} + +func TestAlphaNumeric(t *testing.T) { + + s := "abcd123" + errs := validate.Field(s, "alphanum") + Equal(t, errs, nil) + + s = "abc!23" + errs = validate.Field(s, "alphanum") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "alphanum") + + errs = validate.Field(1, "alphanum") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "alphanum") +} + +func TestAlpha(t *testing.T) { + + s := "abcd" + errs := validate.Field(s, "alpha") + Equal(t, errs, nil) + + s = "abc®" + errs = validate.Field(s, "alpha") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "alpha") + + s = "abc÷" + errs = validate.Field(s, "alpha") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "alpha") + + s = "abc1" + errs = validate.Field(s, "alpha") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "alpha") + + errs = validate.Field(1, "alpha") + NotEqual(t, errs, nil) + AssertError(t, errs, "", "", "alpha") + +} + +func TestStructStringValidation(t *testing.T) { + + tSuccess := &TestString{ + Required: "Required", + Len: "length==10", + Min: "min=1", + Max: "1234567890", + MinMax: "12345", + Lt: "012345678", + Lte: "0123456789", + Gt: "01234567890", + Gte: "0123456789", + OmitEmpty: "", + Sub: &SubTest{ + Test: "1", + }, + SubIgnore: &SubTest{ + Test: "", + }, + Anonymous: struct { + A string `validate:"required"` + }{ + A: "1", + }, + Iface: &Impl{ + F: "123", + }, + } + + errs := validate.Struct(tSuccess) + Equal(t, errs, nil) + + tFail := &TestString{ + Required: "", + Len: "", + Min: "", + Max: "12345678901", + MinMax: "", + Lt: "0123456789", + Lte: "01234567890", + Gt: "1", + Gte: "1", + OmitEmpty: "12345678901", + Sub: &SubTest{ + Test: "", + }, + Anonymous: struct { + A string `validate:"required"` + }{ + A: "", + }, + Iface: &Impl{ + F: "12", + }, + } + + errs = validate.Struct(tFail) + + // Assert Top Level + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 13) + + // Assert Fields + AssertError(t, errs, "TestString.Required", "Required", "required") + AssertError(t, errs, "TestString.Len", "Len", "len") + AssertError(t, errs, "TestString.Min", "Min", "min") + AssertError(t, errs, "TestString.Max", "Max", "max") + AssertError(t, errs, "TestString.MinMax", "MinMax", "min") + AssertError(t, errs, "TestString.Lt", "Lt", "lt") + AssertError(t, errs, "TestString.Lte", "Lte", "lte") + AssertError(t, errs, "TestString.Gt", "Gt", "gt") + AssertError(t, errs, "TestString.Gte", "Gte", "gte") + AssertError(t, errs, "TestString.OmitEmpty", "OmitEmpty", "max") + + // Nested Struct Field Errs + AssertError(t, errs, "TestString.Anonymous.A", "A", "required") + AssertError(t, errs, "TestString.Sub.Test", "Test", "required") + AssertError(t, errs, "TestString.Iface.F", "F", "len") +} + +func TestStructInt32Validation(t *testing.T) { + + tSuccess := &TestInt32{ + Required: 1, + Len: 10, + Min: 1, + Max: 10, + MinMax: 5, + Lt: 9, + Lte: 10, + Gt: 11, + Gte: 10, + OmitEmpty: 0, + } + + errs := validate.Struct(tSuccess) + Equal(t, errs, nil) + + tFail := &TestInt32{ + Required: 0, + Len: 11, + Min: -1, + Max: 11, + MinMax: -1, + Lt: 10, + Lte: 11, + Gt: 10, + Gte: 9, + OmitEmpty: 11, + } + + errs = validate.Struct(tFail) + + // Assert Top Level + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 10) + + // Assert Fields + AssertError(t, errs, "TestInt32.Required", "Required", "required") + AssertError(t, errs, "TestInt32.Len", "Len", "len") + AssertError(t, errs, "TestInt32.Min", "Min", "min") + AssertError(t, errs, "TestInt32.Max", "Max", "max") + AssertError(t, errs, "TestInt32.MinMax", "MinMax", "min") + AssertError(t, errs, "TestInt32.Lt", "Lt", "lt") + AssertError(t, errs, "TestInt32.Lte", "Lte", "lte") + AssertError(t, errs, "TestInt32.Gt", "Gt", "gt") + AssertError(t, errs, "TestInt32.Gte", "Gte", "gte") + AssertError(t, errs, "TestInt32.OmitEmpty", "OmitEmpty", "max") +} + +func TestStructUint64Validation(t *testing.T) { + + tSuccess := &TestUint64{ + Required: 1, + Len: 10, + Min: 1, + Max: 10, + MinMax: 5, + OmitEmpty: 0, + } + + errs := validate.Struct(tSuccess) + Equal(t, errs, nil) + + tFail := &TestUint64{ + Required: 0, + Len: 11, + Min: 0, + Max: 11, + MinMax: 0, + OmitEmpty: 11, + } + + errs = validate.Struct(tFail) + + // Assert Top Level + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 6) + + // Assert Fields + AssertError(t, errs, "TestUint64.Required", "Required", "required") + AssertError(t, errs, "TestUint64.Len", "Len", "len") + AssertError(t, errs, "TestUint64.Min", "Min", "min") + AssertError(t, errs, "TestUint64.Max", "Max", "max") + AssertError(t, errs, "TestUint64.MinMax", "MinMax", "min") + AssertError(t, errs, "TestUint64.OmitEmpty", "OmitEmpty", "max") +} + +func TestStructFloat64Validation(t *testing.T) { + + tSuccess := &TestFloat64{ + Required: 1, + Len: 10, + Min: 1, + Max: 10, + MinMax: 5, + OmitEmpty: 0, + } + + errs := validate.Struct(tSuccess) + Equal(t, errs, nil) + + tFail := &TestFloat64{ + Required: 0, + Len: 11, + Min: 0, + Max: 11, + MinMax: 0, + OmitEmpty: 11, + } + + errs = validate.Struct(tFail) + + // Assert Top Level + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 6) + + // Assert Fields + AssertError(t, errs, "TestFloat64.Required", "Required", "required") + AssertError(t, errs, "TestFloat64.Len", "Len", "len") + AssertError(t, errs, "TestFloat64.Min", "Min", "min") + AssertError(t, errs, "TestFloat64.Max", "Max", "max") + AssertError(t, errs, "TestFloat64.MinMax", "MinMax", "min") + AssertError(t, errs, "TestFloat64.OmitEmpty", "OmitEmpty", "max") +} + +func TestStructSliceValidation(t *testing.T) { + + tSuccess := &TestSlice{ + Required: []int{1}, + Len: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}, + Min: []int{1, 2}, + Max: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}, + MinMax: []int{1, 2, 3, 4, 5}, + OmitEmpty: nil, + } + + errs := validate.Struct(tSuccess) + Equal(t, errs, nil) + + tFail := &TestSlice{ + Required: nil, + Len: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1}, + Min: []int{}, + Max: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1}, + MinMax: []int{}, + OmitEmpty: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1}, + } + + errs = validate.Struct(tFail) + NotEqual(t, errs, nil) + Equal(t, len(errs.(ValidationErrors)), 6) + + // Assert Field Errors + AssertError(t, errs, "TestSlice.Required", "Required", "required") + AssertError(t, errs, "TestSlice.Len", "Len", "len") + AssertError(t, errs, "TestSlice.Min", "Min", "min") + AssertError(t, errs, "TestSlice.Max", "Max", "max") + AssertError(t, errs, "TestSlice.MinMax", "MinMax", "min") + AssertError(t, errs, "TestSlice.OmitEmpty", "OmitEmpty", "max") +} + +func TestInvalidStruct(t *testing.T) { + s := &SubTest{ + Test: "1", + } + + PanicMatches(t, func() { validate.Struct(s.Test) }, "value passed for validation is not a struct") +} + +func TestInvalidValidatorFunction(t *testing.T) { + s := &SubTest{ + Test: "1", + } + + PanicMatches(t, func() { validate.Field(s.Test, "zzxxBadFunction") }, "Undefined validation function on field") +} + +func TestCustomFieldName(t *testing.T) { + type A struct { + B string `schema:"b" validate:"required"` + C string `schema:"c" validate:"required"` + D []bool `schema:"d" validate:"required"` + E string `schema:"-" validate:"required"` + } + + a := &A{} + + errs := New(&Config{TagName: "validate", FieldNameTag: "schema"}).Struct(a).(ValidationErrors) + NotEqual(t, errs, nil) + Equal(t, len(errs), 4) + Equal(t, errs["A.B"].Name, "b") + Equal(t, errs["A.C"].Name, "c") + Equal(t, errs["A.D"].Name, "d") + Equal(t, errs["A.E"].Name, "E") + + errs = New(&Config{TagName: "validate"}).Struct(a).(ValidationErrors) + NotEqual(t, errs, nil) + Equal(t, len(errs), 4) + Equal(t, errs["A.B"].Name, "B") + Equal(t, errs["A.C"].Name, "C") + Equal(t, errs["A.D"].Name, "D") + Equal(t, errs["A.E"].Name, "E") +} + +func TestMutipleRecursiveExtractStructCache(t *testing.T) { + + type Recursive struct { + Field *string `validate:"exists,required,len=5,ne=string"` + } + + var test Recursive + + current := reflect.ValueOf(test) + name := "Recursive" + proceed := make(chan struct{}) + + sc := validate.extractStructCache(current, name) + ptr := fmt.Sprintf("%p", sc) + + for i := 0; i < 100; i++ { + + go func() { + <-proceed + sc := validate.extractStructCache(current, name) + Equal(t, ptr, fmt.Sprintf("%p", sc)) + }() + } + + close(proceed) +} + +// Thanks @robbrockbank, see https://github.com/go-playground/validator/issues/249 +func TestPointerAndOmitEmpty(t *testing.T) { + + type Test struct { + MyInt *int `validate:"omitempty,gte=2,lte=255"` + } + + val1 := 0 + val2 := 256 + + t1 := Test{MyInt: &val1} // This should fail validation on gte because value is 0 + t2 := Test{MyInt: &val2} // This should fail validate on lte because value is 256 + t3 := Test{MyInt: nil} // This should succeed validation because pointer is nil + + errs := validate.Struct(t1) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test.MyInt", "MyInt", "gte") + + errs = validate.Struct(t2) + NotEqual(t, errs, nil) + AssertError(t, errs, "Test.MyInt", "MyInt", "lte") + + errs = validate.Struct(t3) + Equal(t, errs, nil) + + type TestIface struct { + MyInt interface{} `validate:"omitempty,gte=2,lte=255"` + } + + ti1 := TestIface{MyInt: &val1} // This should fail validation on gte because value is 0 + ti2 := TestIface{MyInt: &val2} // This should fail validate on lte because value is 256 + ti3 := TestIface{MyInt: nil} // This should succeed validation because pointer is nil + + errs = validate.Struct(ti1) + NotEqual(t, errs, nil) + AssertError(t, errs, "TestIface.MyInt", "MyInt", "gte") + + errs = validate.Struct(ti2) + NotEqual(t, errs, nil) + AssertError(t, errs, "TestIface.MyInt", "MyInt", "lte") + + errs = validate.Struct(ti3) + Equal(t, errs, nil) +} diff --git a/wercker.yml b/wercker.yml index bb1f7507c..7c7948754 100644 --- a/wercker.yml +++ b/wercker.yml @@ -18,29 +18,7 @@ build: - script: name: Publish Coveralls code: goveralls -service="wercker.com" -coverprofile=profile.cov -repotoken $COVERALLS_TOKEN - - script: - name: package - code: | - . /etc/profile.d/rvm.sh - make package - script: name: pact code: | - ./scripts/pact.sh - - script: - name: Cache artifacts for release - code: | - if [ -d "$WERCKER_CACHE_DIR/dist" ]; then rm -rf ${WERCKER_OUTPUT_DIR}/dist; fi - mkdir ${WERCKER_OUTPUT_DIR}/dist - cp -r dist/*.* ${WERCKER_OUTPUT_DIR}/dist/ - -# TODO: Get tokens for Pact foundation -deploy: - box: golang - steps: - - tcnksm/ghr: - token: $GITHUB_TOKEN - version: $RELEASE_VERSION - input: dist - replace: $RELEASE_IS_DRAFT - pre-release: $RELEASE_IS_DRAFT + ./scripts/pact.sh \ No newline at end of file

    HTML5
    +

    the-input-byte-stream-001
    Result summary & related tests
    Detailed results for this test
    Link to spec

    +
    Assumptions:
    • The default encoding for the browser you are testing is not set to ISO 8859-15.
    • +
    • The test is read from a server that supports HTTP.
    +