diff --git a/.circleci/config.yml b/.circleci/config.yml index f13908db9bd..7df85bd4d11 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -26,7 +26,7 @@ jobs: - image: circleci/golang:1.14 environment: GOPATH: /home/circleci/.go_workspace - GOLANGCI_VERSION: v1.25.0 + GOLANGCI_VERSION: v1.27.0 working_directory: /home/circleci/.go_workspace/src/github.com/loadimpact/k6 steps: # Workaround for custom env vars not available in cache keys @@ -108,12 +108,10 @@ jobs: working_directory: /home/circleci/.go_workspace/src/github.com/loadimpact/k6 steps: - checkout - - setup_remote_docker: - version: 17.11.0-ce + - setup_remote_docker - run: name: Setup repo and docker command: | - git submodule update --init docker info echo "{\"https://index.docker.io/v1/\":{\"auth\":\"$DOCKER_AUTH\",\"email\":\"$DOCKER_EMAIL\"}}" >~/.dockercfg - run: @@ -138,6 +136,9 @@ jobs: if [ "${CIRCLE_BRANCH}" == "master" ]; then docker tag loadimpact/k6 loadimpact/k6:master docker push loadimpact/k6:master + elif [ "${CIRCLE_BRANCH}" == "new-schedulers" ]; then + docker tag loadimpact/k6 loadimpact/k6:new-executors + docker push loadimpact/k6:new-executors elif [[ "${CIRCLE_TAG}" =~ ^v[0-9]+(\.[0-9]+)*$ ]]; then docker tag loadimpact/k6 loadimpact/k6:${CIRCLE_TAG:1} docker push loadimpact/k6:latest diff --git a/.golangci.yml b/.golangci.yml index 9c184fa5fb9..32b7895c413 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -57,4 +57,6 @@ linters: - gomodguard - testpackage - wsl + - gomnd + - goerr113 # most of the errors here are meant for humans fast: false diff --git a/Dockerfile b/Dockerfile index 89d250b7221..4b548b7bd2b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,10 @@ -FROM golang:1.13-alpine as builder +FROM golang:1.14-alpine as builder WORKDIR $GOPATH/src/github.com/loadimpact/k6 ADD . . RUN apk --no-cache add git RUN CGO_ENABLED=0 go install -a -trimpath -ldflags "-s -w -X github.com/loadimpact/k6/lib/consts.VersionDetails=$(date -u +"%FT%T%z")/$(git describe --always --long --dirty)" -FROM alpine:3.10 +FROM alpine:3.11 RUN apk add --no-cache ca-certificates && \ adduser -D -u 12345 -g 12345 k6 COPY --from=builder /go/bin/k6 /usr/bin/k6 diff --git a/Gopkg.lock b/Gopkg.lock index 2243c01a8ac..e90fa5a83fc 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -97,7 +97,7 @@ [[projects]] branch = "master" - digest = "1:fda79eedf88d4aedeccb193dfeae71522bd806f5fc07ae70f05187cbbd4b1146" + digest = "1:dd49b520ab3ef9766f486a5771c1aef117e77b4d09bd8e2690df02cfc8e607b9" name = "github.com/dop251/goja" packages = [ ".", @@ -107,7 +107,7 @@ "token", ] pruneopts = "NUT" - revision = "0cd29d81c048b6c79afb6ba6a1305bcb70c28740" + revision = "bfd59704b500581bf75771f79cc2579741cf3d2c" [[projects]] branch = "master" @@ -240,12 +240,12 @@ version = "v1.6.1" [[projects]] - digest = "1:3b708ebf63bfa9ba3313bedb8526bc0bb284e51474e65e958481476a9d4a12aa" + digest = "1:c9c619da0235710875bae079870a396912a475402f9ccd50667f6ad309476259" name = "github.com/gorilla/websocket" packages = ["."] pruneopts = "NUT" - revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b" - version = "v1.2.0" + revision = "b65e62901fc1c0d968042419e74789f6af455eb9" + version = "v1.4.2" [[projects]] digest = "1:406338ad39ab2e37b7f4452906442a3dbf0eb3379dd1f06aafb5c07e769a5fbb" diff --git a/SUPPORT.md b/SUPPORT.md index 12c068d7c7e..2cc8c52d186 100644 --- a/SUPPORT.md +++ b/SUPPORT.md @@ -9,4 +9,4 @@ Types of questions and where to ask: - Why do you? -- [community.k6.io](https://community.k6.io/) or [Slack](https://k6.io/slack) - When will you? -- [community.k6.io](https://community.k6.io/) or [Slack](https://k6.io/slack) -If your questions are about any of the commercial Load Impact services like managed cloud execution and Load Impact Insights, you can contact or write in the `#loadimpact` channel in [Slack](https://k6.io/slack). +If your questions are about any of the commercial Load Impact services like managed cloud execution and k6 cloud output, you can contact or write in the `#loadimpact` channel in [Slack](https://k6.io/slack). diff --git a/api/server_test.go b/api/server_test.go index c634af19183..27299274b88 100644 --- a/api/server_test.go +++ b/api/server_test.go @@ -29,11 +29,14 @@ import ( "github.com/sirupsen/logrus" logtest "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/urfave/negroni" "github.com/loadimpact/k6/api/common" "github.com/loadimpact/k6/core" + "github.com/loadimpact/k6/core/local" "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/testutils/minirunner" ) func testHTTPHandler(rw http.ResponseWriter, r *http.Request) { @@ -74,10 +77,10 @@ func TestLogger(t *testing.T) { } func TestWithEngine(t *testing.T) { - engine, err := core.NewEngine(nil, lib.Options{}) - if !assert.NoError(t, err) { - return - } + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, logrus.StandardLogger()) + require.NoError(t, err) + engine, err := core.NewEngine(execScheduler, lib.Options{}, logrus.StandardLogger()) + require.NoError(t, err) rw := httptest.NewRecorder() r := httptest.NewRequest("GET", "http://example.com/", nil) diff --git a/api/v1/client/client.go b/api/v1/client/client.go index 0ef1b3bbe6f..dbeef87c541 100644 --- a/api/v1/client/client.go +++ b/api/v1/client/client.go @@ -30,28 +30,79 @@ import ( "net/url" "github.com/manyminds/api2go/jsonapi" + "github.com/sirupsen/logrus" "github.com/loadimpact/k6/api/v1" ) +// Client is a simple HTTP client for the REST API. type Client struct { - BaseURL *url.URL + BaseURL *url.URL + httpClient *http.Client + logger *logrus.Entry } -func New(base string) (*Client, error) { +// Option function are helpers that enable the flexible configuration of the +// REST API client. +type Option func(*Client) + +// New returns a newly configured REST API Client. +func New(base string, options ...Option) (*Client, error) { baseURL, err := url.Parse("http://" + base) if err != nil { return nil, err } - return &Client{BaseURL: baseURL}, nil + c := &Client{ + BaseURL: baseURL, + httpClient: http.DefaultClient, + } + + for _, option := range options { + option(c) + } + + return c, nil +} + +// WithHTTPClient configures the supplied HTTP client to be used when making +// REST API requests. +func WithHTTPClient(httpClient *http.Client) Option { + return Option(func(c *Client) { + c.httpClient = httpClient + }) } -func (c *Client) call(ctx context.Context, method string, rel *url.URL, body, out interface{}) error { +// WithLogger sets the specifield logger to the client. +func WithLogger(logger *logrus.Entry) Option { + return Option(func(c *Client) { + c.logger = logger + }) +} + +// Call executes the desired REST API request. +func (c *Client) Call(ctx context.Context, method string, rel *url.URL, body, out interface{}) (err error) { + if c.logger != nil { + c.logger.Debugf("[REST API] Making a %s request to '%s'", method, rel.String()) + defer func() { + if err != nil { + c.logger.WithError(err).Error("[REST API] Error") + } + }() + } + var bodyReader io.ReadCloser if body != nil { - bodyData, err := jsonapi.Marshal(body) - if err != nil { - return err + var bodyData []byte + switch val := body.(type) { + case []byte: + bodyData = val + case string: + bodyData = []byte(val) + default: + bodyData, err = jsonapi.Marshal(body) + if err != nil { + return err + } } bodyReader = ioutil.NopCloser(bytes.NewBuffer(bodyData)) } @@ -63,7 +114,7 @@ func (c *Client) call(ctx context.Context, method string, rel *url.URL, body, ou } req = req.WithContext(ctx) - res, err := http.DefaultClient.Do(req) + res, err := c.httpClient.Do(req) if err != nil { return err } @@ -82,5 +133,8 @@ func (c *Client) call(ctx context.Context, method string, rel *url.URL, body, ou return errs.Errors[0] } - return jsonapi.Unmarshal(data, out) + if out != nil { + return jsonapi.Unmarshal(data, out) + } + return nil } diff --git a/api/v1/client/metrics.go b/api/v1/client/metrics.go index 8d3753ef520..8666d43ffb5 100644 --- a/api/v1/client/metrics.go +++ b/api/v1/client/metrics.go @@ -24,11 +24,10 @@ import ( "context" "net/url" - "github.com/loadimpact/k6/api/v1" + v1 "github.com/loadimpact/k6/api/v1" ) -var MetricsURL = &url.URL{Path: "/v1/metrics"} - +// Metrics returns the current metrics summary. func (c *Client) Metrics(ctx context.Context) (ret []v1.Metric, err error) { - return ret, c.call(ctx, "GET", MetricsURL, nil, &ret) + return ret, c.Call(ctx, "GET", &url.URL{Path: "/v1/metrics"}, nil, &ret) } diff --git a/api/v1/client/status.go b/api/v1/client/status.go index 70cd23f35f6..a49aabd0c61 100644 --- a/api/v1/client/status.go +++ b/api/v1/client/status.go @@ -24,15 +24,16 @@ import ( "context" "net/url" - "github.com/loadimpact/k6/api/v1" + v1 "github.com/loadimpact/k6/api/v1" ) -var StatusURL = &url.URL{Path: "/v1/status"} - +// Status returns the current k6 status. func (c *Client) Status(ctx context.Context) (ret v1.Status, err error) { - return ret, c.call(ctx, "GET", StatusURL, nil, &ret) + return ret, c.Call(ctx, "GET", &url.URL{Path: "/v1/status"}, nil, &ret) } +// SetStatus tries to change the current status and returns the new one if it +// was successful. func (c *Client) SetStatus(ctx context.Context, patch v1.Status) (ret v1.Status, err error) { - return ret, c.call(ctx, "PATCH", StatusURL, patch, &ret) + return ret, c.Call(ctx, "PATCH", &url.URL{Path: "/v1/status"}, patch, &ret) } diff --git a/api/v1/group.go b/api/v1/group.go index 429c5f76cdc..abc2c4dea25 100644 --- a/api/v1/group.go +++ b/api/v1/group.go @@ -21,9 +21,10 @@ package v1 import ( - "github.com/loadimpact/k6/lib" "github.com/manyminds/api2go/jsonapi" "github.com/pkg/errors" + + "github.com/loadimpact/k6/lib" ) type Check struct { diff --git a/api/v1/group_routes.go b/api/v1/group_routes.go index ead29a3bb36..375ea2b8f34 100644 --- a/api/v1/group_routes.go +++ b/api/v1/group_routes.go @@ -24,14 +24,15 @@ import ( "net/http" "github.com/julienschmidt/httprouter" - "github.com/loadimpact/k6/api/common" "github.com/manyminds/api2go/jsonapi" + + "github.com/loadimpact/k6/api/common" ) func HandleGetGroups(rw http.ResponseWriter, r *http.Request, p httprouter.Params) { engine := common.GetEngine(r.Context()) - root := NewGroup(engine.Executor.GetRunner().GetDefaultGroup(), nil) + root := NewGroup(engine.ExecutionScheduler.GetRunner().GetDefaultGroup(), nil) groups := FlattenGroup(root) data, err := jsonapi.Marshal(groups) @@ -47,7 +48,7 @@ func HandleGetGroup(rw http.ResponseWriter, r *http.Request, p httprouter.Params engine := common.GetEngine(r.Context()) - root := NewGroup(engine.Executor.GetRunner().GetDefaultGroup(), nil) + root := NewGroup(engine.ExecutionScheduler.GetRunner().GetDefaultGroup(), nil) groups := FlattenGroup(root) var group *Group diff --git a/api/v1/group_routes_test.go b/api/v1/group_routes_test.go index 17af242676f..94894e863a3 100644 --- a/api/v1/group_routes_test.go +++ b/api/v1/group_routes_test.go @@ -26,11 +26,15 @@ import ( "net/http/httptest" "testing" + "github.com/manyminds/api2go/jsonapi" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/loadimpact/k6/core" "github.com/loadimpact/k6/core/local" "github.com/loadimpact/k6/lib" - "github.com/manyminds/api2go/jsonapi" - "github.com/stretchr/testify/assert" + "github.com/loadimpact/k6/lib/testutils/minirunner" ) func TestGetGroups(t *testing.T) { @@ -41,8 +45,10 @@ func TestGetGroups(t *testing.T) { g2, err := g1.Group("group 2") assert.NoError(t, err) - engine, err := core.NewEngine(local.New(&lib.MiniRunner{Group: g0}), lib.Options{}) - assert.NoError(t, err) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Group: g0}, logrus.StandardLogger()) + require.NoError(t, err) + engine, err := core.NewEngine(execScheduler, lib.Options{}, logrus.StandardLogger()) + require.NoError(t, err) t.Run("list", func(t *testing.T) { rw := httptest.NewRecorder() @@ -61,34 +67,33 @@ func TestGetGroups(t *testing.T) { } }) - // t.Run("groups", func(t *testing.T) { - // var groups []Group - // assert.NoError(t, jsonapi.Unmarshal(body, &groups)) - // if assert.Len(t, groups, 3) { - // for _, g := range groups { - // switch g.ID { - // case g0.ID: - // assert.Equal(t, "", g.Name) - // assert.Nil(t, g.Parent) - // assert.Equal(t, "", g.ParentID) - // assert.Len(t, g.GroupIDs, 1) - // assert.EqualValues(t, []string{g1.ID}, g.GroupIDs) - // case g1.ID: - // assert.Equal(t, "group 1", g.Name) - // assert.Nil(t, g.Parent) - // assert.Equal(t, g0.ID, g.ParentID) - // assert.EqualValues(t, []string{g2.ID}, g.GroupIDs) - // case g2.ID: - // assert.Equal(t, "group 2", g.Name) - // assert.Nil(t, g.Parent) - // assert.Equal(t, g1.ID, g.ParentID) - // assert.EqualValues(t, []string{}, g.GroupIDs) - // default: - // assert.Fail(t, "Unknown ID: "+g.ID) - // } - // } - // } - // }) + t.Run("groups", func(t *testing.T) { + var groups []Group + require.NoError(t, jsonapi.Unmarshal(body, &groups)) + require.Len(t, groups, 3) + for _, g := range groups { + switch g.ID { + case g0.ID: + assert.Equal(t, "", g.Name) + assert.Nil(t, g.Parent) + assert.Equal(t, "", g.ParentID) + assert.Len(t, g.GroupIDs, 1) + assert.EqualValues(t, []string{g1.ID}, g.GroupIDs) + case g1.ID: + assert.Equal(t, "group 1", g.Name) + assert.Nil(t, g.Parent) + assert.Equal(t, g0.ID, g.ParentID) + assert.EqualValues(t, []string{g2.ID}, g.GroupIDs) + case g2.ID: + assert.Equal(t, "group 2", g.Name) + assert.Nil(t, g.Parent) + assert.Equal(t, g1.ID, g.ParentID) + assert.EqualValues(t, []string{}, g.GroupIDs) + default: + assert.Fail(t, "Unknown ID: "+g.ID) + } + } + }) }) for _, gp := range []*lib.Group{g0, g1, g2} { t.Run(gp.Name, func(t *testing.T) { diff --git a/api/v1/group_test.go b/api/v1/group_test.go index ed767e422fd..695fff47541 100644 --- a/api/v1/group_test.go +++ b/api/v1/group_test.go @@ -23,8 +23,9 @@ package v1 import ( "testing" - "github.com/loadimpact/k6/lib" "github.com/stretchr/testify/assert" + + "github.com/loadimpact/k6/lib" ) func TestNewCheck(t *testing.T) { diff --git a/api/v1/metric.go b/api/v1/metric.go index c71c7e8930b..c1950a898ce 100644 --- a/api/v1/metric.go +++ b/api/v1/metric.go @@ -25,8 +25,9 @@ import ( "encoding/json" "time" - "github.com/loadimpact/k6/stats" "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/stats" ) type NullMetricType struct { diff --git a/api/v1/metric_routes.go b/api/v1/metric_routes.go index 08016c9dda7..f2f43eb8e46 100644 --- a/api/v1/metric_routes.go +++ b/api/v1/metric_routes.go @@ -25,16 +25,17 @@ import ( "time" "github.com/julienschmidt/httprouter" - "github.com/loadimpact/k6/api/common" "github.com/manyminds/api2go/jsonapi" + + "github.com/loadimpact/k6/api/common" ) func HandleGetMetrics(rw http.ResponseWriter, r *http.Request, p httprouter.Params) { engine := common.GetEngine(r.Context()) var t time.Duration - if engine.Executor != nil { - t = engine.Executor.GetTime() + if engine.ExecutionScheduler != nil { + t = engine.ExecutionScheduler.GetState().GetCurrentTestRunDuration() } metrics := make([]Metric, 0) @@ -55,8 +56,8 @@ func HandleGetMetric(rw http.ResponseWriter, r *http.Request, p httprouter.Param engine := common.GetEngine(r.Context()) var t time.Duration - if engine.Executor != nil { - t = engine.Executor.GetTime() + if engine.ExecutionScheduler != nil { + t = engine.ExecutionScheduler.GetState().GetCurrentTestRunDuration() } var metric Metric diff --git a/api/v1/metric_routes_test.go b/api/v1/metric_routes_test.go index 265871fd9c6..8fddda1491b 100644 --- a/api/v1/metric_routes_test.go +++ b/api/v1/metric_routes_test.go @@ -26,17 +26,24 @@ import ( "net/http/httptest" "testing" + "github.com/manyminds/api2go/jsonapi" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/core" + "github.com/loadimpact/k6/core/local" "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/testutils/minirunner" "github.com/loadimpact/k6/stats" - "github.com/manyminds/api2go/jsonapi" - "github.com/stretchr/testify/assert" - null "gopkg.in/guregu/null.v3" ) func TestGetMetrics(t *testing.T) { - engine, err := core.NewEngine(nil, lib.Options{}) - assert.NoError(t, err) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, logrus.StandardLogger()) + require.NoError(t, err) + engine, err := core.NewEngine(execScheduler, lib.Options{}, logrus.StandardLogger()) + require.NoError(t, err) engine.Metrics = map[string]*stats.Metric{ "my_metric": stats.New("my_metric", stats.Trend, stats.Time), @@ -74,8 +81,10 @@ func TestGetMetrics(t *testing.T) { } func TestGetMetric(t *testing.T) { - engine, err := core.NewEngine(nil, lib.Options{}) - assert.NoError(t, err) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, logrus.StandardLogger()) + require.NoError(t, err) + engine, err := core.NewEngine(execScheduler, lib.Options{}, logrus.StandardLogger()) + require.NoError(t, err) engine.Metrics = map[string]*stats.Metric{ "my_metric": stats.New("my_metric", stats.Trend, stats.Time), diff --git a/api/v1/metric_test.go b/api/v1/metric_test.go index 18fbfdeebc7..78f3614bf36 100644 --- a/api/v1/metric_test.go +++ b/api/v1/metric_test.go @@ -24,9 +24,10 @@ import ( "encoding/json" "testing" - "github.com/loadimpact/k6/stats" "github.com/stretchr/testify/assert" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/stats" ) func TestNullMetricTypeJSON(t *testing.T) { diff --git a/api/v1/routes_test.go b/api/v1/routes_test.go index e289a9916d6..31317d90805 100644 --- a/api/v1/routes_test.go +++ b/api/v1/routes_test.go @@ -26,9 +26,10 @@ import ( "net/http/httptest" "testing" + "github.com/stretchr/testify/assert" + "github.com/loadimpact/k6/api/common" "github.com/loadimpact/k6/core" - "github.com/stretchr/testify/assert" ) func newRequestWithEngine(engine *core.Engine, method, target string, body io.Reader) *http.Request { diff --git a/api/v1/setup_teardown_routes.go b/api/v1/setup_teardown_routes.go index 1ed62a27697..7a00bb84c7d 100644 --- a/api/v1/setup_teardown_routes.go +++ b/api/v1/setup_teardown_routes.go @@ -26,8 +26,9 @@ import ( "net/http" "github.com/julienschmidt/httprouter" - "github.com/loadimpact/k6/api/common" "github.com/manyminds/api2go/jsonapi" + + "github.com/loadimpact/k6/api/common" ) // NullSetupData is wrapper around null to satisfy jsonapi @@ -71,7 +72,7 @@ func handleSetupDataOutput(rw http.ResponseWriter, setupData json.RawMessage) { // HandleGetSetupData just returns the current JSON-encoded setup data func HandleGetSetupData(rw http.ResponseWriter, r *http.Request, p httprouter.Params) { - runner := common.GetEngine(r.Context()).Executor.GetRunner() + runner := common.GetEngine(r.Context()).ExecutionScheduler.GetRunner() handleSetupDataOutput(rw, runner.GetSetupData()) } @@ -91,7 +92,7 @@ func HandleSetSetupData(rw http.ResponseWriter, r *http.Request, p httprouter.Pa } } - runner := common.GetEngine(r.Context()).Executor.GetRunner() + runner := common.GetEngine(r.Context()).ExecutionScheduler.GetRunner() if len(body) == 0 { runner.SetSetupData(nil) @@ -105,7 +106,7 @@ func HandleSetSetupData(rw http.ResponseWriter, r *http.Request, p httprouter.Pa // HandleRunSetup executes the runner's Setup() method and returns the result func HandleRunSetup(rw http.ResponseWriter, r *http.Request, p httprouter.Params) { engine := common.GetEngine(r.Context()) - runner := engine.Executor.GetRunner() + runner := engine.ExecutionScheduler.GetRunner() if err := runner.Setup(r.Context(), engine.Samples); err != nil { apiError(rw, "Error executing setup", err.Error(), http.StatusInternalServerError) @@ -118,7 +119,7 @@ func HandleRunSetup(rw http.ResponseWriter, r *http.Request, p httprouter.Params // HandleRunTeardown executes the runner's Teardown() method func HandleRunTeardown(rw http.ResponseWriter, r *http.Request, p httprouter.Params) { engine := common.GetEngine(r.Context()) - runner := common.GetEngine(r.Context()).Executor.GetRunner() + runner := common.GetEngine(r.Context()).ExecutionScheduler.GetRunner() if err := runner.Teardown(r.Context(), engine.Samples); err != nil { apiError(rw, "Error executing teardown", err.Error(), http.StatusInternalServerError) diff --git a/api/v1/setup_teardown_routes_test.go b/api/v1/setup_teardown_routes_test.go index 14476da7122..ab7e504110d 100644 --- a/api/v1/setup_teardown_routes_test.go +++ b/api/v1/setup_teardown_routes_test.go @@ -30,21 +30,23 @@ import ( "testing" "time" + "github.com/manyminds/api2go/jsonapi" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/core" "github.com/loadimpact/k6/core/local" "github.com/loadimpact/k6/js" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/loader" - "github.com/manyminds/api2go/jsonapi" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - null "gopkg.in/guregu/null.v3" ) func TestSetupData(t *testing.T) { t.Parallel() - var testCases = []struct { + testCases := []struct { name string script []byte setupRuns [][3]string @@ -142,19 +144,26 @@ func TestSetupData(t *testing.T) { runner.SetOptions(lib.Options{ Paused: null.BoolFrom(true), VUs: null.IntFrom(2), - VUsMax: null.IntFrom(2), Iterations: null.IntFrom(3), - SetupTimeout: types.NullDurationFrom(1 * time.Second), - TeardownTimeout: types.NullDurationFrom(1 * time.Second), + NoSetup: null.BoolFrom(true), + SetupTimeout: types.NullDurationFrom(5 * time.Second), + TeardownTimeout: types.NullDurationFrom(5 * time.Second), }) - executor := local.New(runner) - executor.SetRunSetup(false) - engine, err := core.NewEngine(executor, runner.GetOptions()) + execScheduler, err := local.NewExecutionScheduler(runner, logrus.StandardLogger()) + require.NoError(t, err) + engine, err := core.NewEngine(execScheduler, runner.GetOptions(), logrus.StandardLogger()) + require.NoError(t, err) + + globalCtx, globalCancel := context.WithCancel(context.Background()) + runCtx, runCancel := context.WithCancel(globalCtx) + run, wait, err := engine.Init(globalCtx, runCtx) + defer wait() + defer globalCancel() + require.NoError(t, err) - ctx, cancel := context.WithCancel(context.Background()) errC := make(chan error) - go func() { errC <- engine.Run(ctx) }() + go func() { errC <- run() }() handler := NewHandler() @@ -179,14 +188,14 @@ func TestSetupData(t *testing.T) { checkSetup(setupRun[0], setupRun[1], setupRun[2]) } - engine.Executor.SetPaused(false) + require.NoError(t, engine.ExecutionScheduler.SetPaused(false)) select { case <-time.After(10 * time.Second): - cancel() + runCancel() t.Fatal("Test timed out") case err := <-errC: - cancel() + runCancel() require.NoError(t, err) } }) diff --git a/api/v1/status.go b/api/v1/status.go index 3472e25cf1d..e23934df888 100644 --- a/api/v1/status.go +++ b/api/v1/status.go @@ -21,26 +21,32 @@ package v1 import ( - "github.com/loadimpact/k6/core" "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/core" + "github.com/loadimpact/k6/lib" ) type Status struct { - Paused null.Bool `json:"paused" yaml:"paused"` - VUs null.Int `json:"vus" yaml:"vus"` - VUsMax null.Int `json:"vus-max" yaml:"vus-max"` + Status lib.ExecutionStatus `json:"status" yaml:"status"` - // Readonly. - Running bool `json:"running" yaml:"running"` - Tainted bool `json:"tainted" yaml:"tainted"` + Paused null.Bool `json:"paused" yaml:"paused"` + VUs null.Int `json:"vus" yaml:"vus"` + VUsMax null.Int `json:"vus-max" yaml:"vus-max"` + Stopped bool `json:"stopped" yaml:"stopped"` + Running bool `json:"running" yaml:"running"` + Tainted bool `json:"tainted" yaml:"tainted"` } func NewStatus(engine *core.Engine) Status { + executionState := engine.ExecutionScheduler.GetState() return Status{ - Paused: null.BoolFrom(engine.Executor.IsPaused()), - VUs: null.IntFrom(engine.Executor.GetVUs()), - VUsMax: null.IntFrom(engine.Executor.GetVUsMax()), - Running: engine.Executor.IsRunning(), + Status: executionState.GetCurrentExecutionStatus(), + Running: executionState.HasStarted() && !executionState.HasEnded(), + Paused: null.BoolFrom(executionState.IsPaused()), + Stopped: engine.IsStopped(), + VUs: null.IntFrom(executionState.GetCurrentlyActiveVUsCount()), + VUsMax: null.IntFrom(executionState.GetInitializedVUsCount()), Tainted: engine.IsTainted(), } } diff --git a/api/v1/status_routes.go b/api/v1/status_routes.go index 5205d8ad7c1..ee9a62e9e66 100644 --- a/api/v1/status_routes.go +++ b/api/v1/status_routes.go @@ -21,12 +21,16 @@ package v1 import ( + "errors" "io/ioutil" "net/http" "github.com/julienschmidt/httprouter" - "github.com/loadimpact/k6/api/common" "github.com/manyminds/api2go/jsonapi" + + "github.com/loadimpact/k6/api/common" + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/executor" ) func HandleGetStatus(rw http.ResponseWriter, r *http.Request, p httprouter.Params) { @@ -41,6 +45,18 @@ func HandleGetStatus(rw http.ResponseWriter, r *http.Request, p httprouter.Param _, _ = rw.Write(data) } +func getFirstExternallyControlledExecutor( + execScheduler lib.ExecutionScheduler, +) (*executor.ExternallyControlled, error) { + executors := execScheduler.GetExecutors() + for _, s := range executors { + if mex, ok := s.(*executor.ExternallyControlled); ok { + return mex, nil + } + } + return nil, errors.New("an externally-controlled executor needs to be configured for live configuration updates") +} + func HandlePatchStatus(rw http.ResponseWriter, r *http.Request, p httprouter.Params) { engine := common.GetEngine(r.Context()) @@ -56,21 +72,38 @@ func HandlePatchStatus(rw http.ResponseWriter, r *http.Request, p httprouter.Par return } - if status.VUsMax.Valid { - if err := engine.Executor.SetVUsMax(status.VUsMax.Int64); err != nil { - apiError(rw, "Couldn't change cap", err.Error(), http.StatusBadRequest) - return + if status.Stopped { //nolint:nestif + engine.Stop() + } else { + if status.Paused.Valid { + if err = engine.ExecutionScheduler.SetPaused(status.Paused.Bool); err != nil { + apiError(rw, "Pause error", err.Error(), http.StatusInternalServerError) + return + } } - } - if status.VUs.Valid { - if err := engine.Executor.SetVUs(status.VUs.Int64); err != nil { - apiError(rw, "Couldn't scale", err.Error(), http.StatusBadRequest) - return + + if status.VUsMax.Valid || status.VUs.Valid { + //TODO: add ability to specify the actual executor id? Though this should + //likely be in the v2 REST API, where we could implement it in a way that + //may allow us to eventually support other executor types. + executor, updateErr := getFirstExternallyControlledExecutor(engine.ExecutionScheduler) + if updateErr != nil { + apiError(rw, "Execution config error", updateErr.Error(), http.StatusInternalServerError) + return + } + newConfig := executor.GetCurrentConfig().ExternallyControlledConfigParams + if status.VUsMax.Valid { + newConfig.MaxVUs = status.VUsMax + } + if status.VUs.Valid { + newConfig.VUs = status.VUs + } + if updateErr := executor.UpdateConfig(r.Context(), newConfig); updateErr != nil { + apiError(rw, "Config update error", updateErr.Error(), http.StatusBadRequest) + return + } } } - if status.Paused.Valid { - engine.Executor.SetPaused(status.Paused.Bool) - } data, err := jsonapi.Marshal(NewStatus(engine)) if err != nil { diff --git a/api/v1/status_routes_test.go b/api/v1/status_routes_test.go index 8e33d0a85df..79e1bb5478a 100644 --- a/api/v1/status_routes_test.go +++ b/api/v1/status_routes_test.go @@ -22,21 +22,30 @@ package v1 import ( "bytes" + "context" "encoding/json" "net/http" "net/http/httptest" "testing" + "time" - "github.com/loadimpact/k6/core" - "github.com/loadimpact/k6/lib" "github.com/manyminds/api2go/jsonapi" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/core" + "github.com/loadimpact/k6/core/local" + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/testutils/minirunner" ) func TestGetStatus(t *testing.T) { - engine, err := core.NewEngine(nil, lib.Options{}) - assert.NoError(t, err) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{}, logrus.StandardLogger()) + require.NoError(t, err) + engine, err := core.NewEngine(execScheduler, lib.Options{}, logrus.StandardLogger()) + require.NoError(t, err) rw := httptest.NewRecorder() NewHandler().ServeHTTP(rw, newRequestWithEngine(engine, "GET", "/v1/status", nil)) @@ -58,6 +67,7 @@ func TestGetStatus(t *testing.T) { assert.True(t, status.Paused.Valid) assert.True(t, status.VUs.Valid) assert.True(t, status.VUsMax.Valid) + assert.False(t, status.Stopped) assert.False(t, status.Tainted) }) } @@ -67,17 +77,35 @@ func TestPatchStatus(t *testing.T) { StatusCode int Status Status }{ - "nothing": {200, Status{}}, - "paused": {200, Status{Paused: null.BoolFrom(true)}}, - "max vus": {200, Status{VUsMax: null.IntFrom(10)}}, - "too many vus": {400, Status{VUs: null.IntFrom(10), VUsMax: null.IntFrom(0)}}, - "vus": {200, Status{VUs: null.IntFrom(10), VUsMax: null.IntFrom(10)}}, + "nothing": {200, Status{}}, + "paused": {200, Status{Paused: null.BoolFrom(true)}}, + "max vus": {200, Status{VUsMax: null.IntFrom(20)}}, + "max vus below initial": {400, Status{VUsMax: null.IntFrom(5)}}, + "too many vus": {400, Status{VUs: null.IntFrom(10), VUsMax: null.IntFrom(0)}}, + "vus": {200, Status{VUs: null.IntFrom(10), VUsMax: null.IntFrom(10)}}, } + scenarios := lib.ScenarioConfigs{} + err := json.Unmarshal([]byte(` + {"external": {"executor": "externally-controlled", + "vus": 0, "maxVUs": 10, "duration": "1s"}}`), &scenarios) + require.NoError(t, err) + options := lib.Options{Scenarios: scenarios} + for name, indata := range testdata { t.Run(name, func(t *testing.T) { - engine, err := core.NewEngine(nil, lib.Options{}) - assert.NoError(t, err) + execScheduler, err := local.NewExecutionScheduler(&minirunner.MiniRunner{Options: options}, logrus.StandardLogger()) + require.NoError(t, err) + engine, err := core.NewEngine(execScheduler, options, logrus.StandardLogger()) + require.NoError(t, err) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + run, _, err := engine.Init(ctx, ctx) + require.NoError(t, err) + + go func() { _ = run() }() + // wait for the executor to initialize to avoid a potential data race below + time.Sleep(100 * time.Millisecond) body, err := jsonapi.Marshal(indata.Status) if !assert.NoError(t, err) { diff --git a/appveyor.yml b/appveyor.yml index 721a7e09f0d..56a9b3adc25 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -10,7 +10,7 @@ environment: # specific to go VERSION: "%APPVEYOR_REPO_TAG_NAME:v=%" GOPATH: c:\gopath - GOVERSION: 1.14 + GOVERSION: 1.14.4 GOMAXPROCS: 2 CGO_ENABLED: '0' GOARCH: amd64 @@ -60,7 +60,7 @@ build_script: test_script: - cd %APPVEYOR_BUILD_FOLDER% - go version - - go test ./... + - go test -p 1 ./... deploy_script: - cd %APPVEYOR_BUILD_FOLDER%\packaging diff --git a/cmd/archive.go b/cmd/archive.go index 0af47b7f597..9657e054bd8 100644 --- a/cmd/archive.go +++ b/cmd/archive.go @@ -23,10 +23,11 @@ package cmd import ( "os" - "github.com/loadimpact/k6/loader" "github.com/spf13/afero" "github.com/spf13/cobra" "github.com/spf13/pflag" + + "github.com/loadimpact/k6/loader" ) var archiveOut = "archive.tar" @@ -58,7 +59,7 @@ An archive is a fully self-contained test run, and can be executed identically e return err } - runtimeOptions, err := getRuntimeOptions(cmd.Flags()) + runtimeOptions, err := getRuntimeOptions(cmd.Flags(), buildEnvMap(os.Environ())) if err != nil { return err } @@ -77,7 +78,7 @@ An archive is a fully self-contained test run, and can be executed identically e return err } - if _, cerr := deriveAndValidateConfig(conf); cerr != nil { + if _, cerr := deriveAndValidateConfig(conf, r.IsExecutable); cerr != nil { return ExitCode{error: cerr, Code: invalidConfigErrorCode} } diff --git a/cmd/cloud.go b/cmd/cloud.go index 8b45ae6d03e..a0f1eb2bd3c 100644 --- a/cmd/cloud.go +++ b/cmd/cloud.go @@ -23,6 +23,7 @@ package cmd import ( "bytes" "encoding/json" + "fmt" "os" "os/signal" "path/filepath" @@ -31,6 +32,7 @@ import ( "github.com/kelseyhightower/envconfig" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/spf13/afero" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -40,8 +42,7 @@ import ( "github.com/loadimpact/k6/loader" "github.com/loadimpact/k6/stats/cloud" "github.com/loadimpact/k6/ui" - - "github.com/sirupsen/logrus" + "github.com/loadimpact/k6/ui/pb" ) const ( @@ -65,11 +66,12 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud RunE: func(cmd *cobra.Command, args []string) error { //TODO: disable in quiet mode? _, _ = BannerColor.Fprintf(stdout, "\n%s\n\n", consts.Banner) - initBar := ui.ProgressBar{ - Width: 60, - Left: func() string { return " uploading script" }, - } - fprintf(stdout, "%s \r", initBar.String()) + + progressBar := pb.New( + pb.WithConstLeft(" Init"), + pb.WithConstProgress(0, "Parsing script"), + ) + printBar(progressBar) // Runner pwd, err := os.Getwd() @@ -84,16 +86,18 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud return err } - runtimeOptions, err := getRuntimeOptions(cmd.Flags()) + runtimeOptions, err := getRuntimeOptions(cmd.Flags(), buildEnvMap(os.Environ())) if err != nil { return err } + modifyAndPrintBar(progressBar, pb.WithConstProgress(0, "Getting script options")) r, err := newRunner(src, runType, filesystems, runtimeOptions) if err != nil { return err } + modifyAndPrintBar(progressBar, pb.WithConstProgress(0, "Consolidating options")) cliOpts, err := getOptions(cmd.Flags()) if err != nil { return err @@ -103,11 +107,15 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud return err } - derivedConf, cerr := deriveAndValidateConfig(conf) + derivedConf, cerr := deriveAndValidateConfig(conf, r.IsExecutable) if cerr != nil { return ExitCode{error: cerr, Code: invalidConfigErrorCode} } + //TODO: validate for usage of execution segment + //TODO: validate for externally controlled executor (i.e. executors that aren't distributable) + //TODO: move those validations to a separate function and reuse validateConfig()? + err = r.SetOptions(conf.Options) if err != nil { return err @@ -122,6 +130,7 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud return errors.New("Not logged in, please use `k6 login cloud`.") } + modifyAndPrintBar(progressBar, pb.WithConstProgress(0, "Building the archive")) arc := r.MakeArchive() // TODO: Fix this // We reuse cloud.Config for parsing options.ext.loadimpact, but this probably shouldn't be @@ -170,22 +179,31 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud } // Start cloud test run + modifyAndPrintBar(progressBar, pb.WithConstProgress(0, "Validating script options")) client := cloud.NewClient(cloudConfig.Token.String, cloudConfig.Host.String, consts.Version) if err := client.ValidateOptions(arc.Options); err != nil { return err } + modifyAndPrintBar(progressBar, pb.WithConstProgress(0, "Uploading archive")) refID, err := client.StartCloudTestRun(name, cloudConfig.ProjectID.Int64, arc) if err != nil { return err } + et, err := lib.NewExecutionTuple(derivedConf.ExecutionSegment, derivedConf.ExecutionSegmentSequence) + if err != nil { + return err + } testURL := cloud.URLForResults(refID, cloudConfig) - fprintf(stdout, "\n\n") - fprintf(stdout, " execution: %s\n", ui.ValueColor.Sprint("cloud")) - fprintf(stdout, " script: %s\n", ui.ValueColor.Sprint(filename)) - fprintf(stdout, " output: %s\n", ui.ValueColor.Sprint(testURL)) - fprintf(stdout, "\n") + executionPlan := derivedConf.Scenarios.GetFullExecutionRequirements(et) + printExecutionDescription("cloud", filename, testURL, derivedConf, et, executionPlan, nil) + + modifyAndPrintBar( + progressBar, + pb.WithConstLeft(" Run "), + pb.WithConstProgress(0, "Initializing the cloud test"), + ) // The quiet option hides the progress bar and disallow aborting the test if quiet { @@ -197,15 +215,34 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud signal.Notify(sigC, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) defer signal.Stop(sigC) - var progressErr error + var ( + startTime time.Time + maxDuration time.Duration + ) + maxDuration, _ = lib.GetEndOffset(executionPlan) + testProgress := &cloud.TestProgressResponse{} - progress := ui.ProgressBar{ - Width: 60, - Left: func() string { - return " " + testProgress.RunStatusText - }, - } + progressBar.Modify( + pb.WithProgress(func() (float64, []string) { + statusText := testProgress.RunStatusText + if testProgress.RunStatus == lib.RunStatusRunning { + if startTime.IsZero() { + startTime = time.Now() + } + spent := time.Since(startTime) + if spent > maxDuration { + statusText = maxDuration.String() + } else { + statusText = fmt.Sprintf("%s/%s", pb.GetFixedLengthDuration(spent, maxDuration), maxDuration) + } + } + + return testProgress.Progress, []string{statusText} + }), + ) + + var progressErr error ticker := time.NewTicker(time.Millisecond * 2000) shouldExitLoop := false @@ -218,8 +255,7 @@ This will execute the test on the Load Impact cloud service. Use "k6 login cloud if (testProgress.RunStatus > lib.RunStatusRunning) || (exitOnRunning && testProgress.RunStatus == lib.RunStatusRunning) { shouldExitLoop = true } - progress.Progress = testProgress.Progress - fprintf(stdout, "%s\x1b[0K\r", progress.String()) + printBar(progressBar) } else { logrus.WithError(progressErr).Error("Test progress error") } diff --git a/cmd/collectors.go b/cmd/collectors.go index 749dc2a99ab..6de6acb4972 100644 --- a/cmd/collectors.go +++ b/cmd/collectors.go @@ -27,6 +27,9 @@ import ( "gopkg.in/guregu/null.v3" "github.com/kelseyhightower/envconfig" + "github.com/pkg/errors" + "github.com/spf13/afero" + "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/consts" "github.com/loadimpact/k6/loader" @@ -39,8 +42,6 @@ import ( "github.com/loadimpact/k6/stats/kafka" "github.com/loadimpact/k6/stats/statsd" "github.com/loadimpact/k6/stats/statsd/common" - "github.com/pkg/errors" - "github.com/spf13/afero" ) const ( @@ -66,7 +67,9 @@ func parseCollector(s string) (t, arg string) { } //TODO: totally refactor this... -func getCollector(collectorName, arg string, src *loader.SourceData, conf Config) (lib.Collector, error) { +func getCollector( + collectorName, arg string, src *loader.SourceData, conf Config, executionPlan []lib.ExecutionStep, +) (lib.Collector, error) { switch collectorName { case collectorJSON: return jsonc.New(afero.NewOsFs(), arg) @@ -89,7 +92,7 @@ func getCollector(collectorName, arg string, src *loader.SourceData, conf Config if arg != "" { config.Name = null.StringFrom(arg) } - return cloud.New(config, src, conf.Options, consts.Version) + return cloud.New(config, src, conf.Options, executionPlan, consts.Version) case collectorKafka: config := kafka.NewConfig().Apply(conf.Collectors.Kafka) if err := envconfig.Process("", &config); err != nil { @@ -135,8 +138,10 @@ func getCollector(collectorName, arg string, src *loader.SourceData, conf Config } } -func newCollector(collectorName, arg string, src *loader.SourceData, conf Config) (lib.Collector, error) { - collector, err := getCollector(collectorName, arg, src, conf) +func newCollector( + collectorName, arg string, src *loader.SourceData, conf Config, executionPlan []lib.ExecutionStep, +) (lib.Collector, error) { + collector, err := getCollector(collectorName, arg, src, conf, executionPlan) if err != nil { return collector, err } diff --git a/cmd/common.go b/cmd/common.go index df2745f3b13..aa4fcce03f5 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -21,17 +21,16 @@ package cmd import ( - "bytes" "fmt" "io" "os" - "sync" - "github.com/loadimpact/k6/lib/types" "github.com/spf13/afero" "github.com/spf13/cobra" "github.com/spf13/pflag" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib/types" ) // Use these when interacting with fs and writing to terminal, makes a command testable @@ -53,23 +52,6 @@ type ExitCode struct { Hint string } -// A writer that syncs writes with a mutex and, if the output is a TTY, clears before newlines. -type consoleWriter struct { - Writer io.Writer - IsTTY bool - Mutex *sync.Mutex -} - -func (w consoleWriter) Write(p []byte) (n int, err error) { - if w.IsTTY { - p = bytes.Replace(p, []byte{'\n'}, []byte{'\x1b', '[', '0', 'K', '\n'}, -1) - } - w.Mutex.Lock() - n, err = w.Writer.Write(p) - w.Mutex.Unlock() - return -} - //TODO: refactor the CLI config so these functions aren't needed - they // can mask errors by failing only at runtime, not at compile time func getNullBool(flags *pflag.FlagSet, key string) null.Bool { diff --git a/cmd/config.go b/cmd/config.go index 33d83ee90aa..8d635548b42 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -29,14 +29,12 @@ import ( "strings" "github.com/kelseyhightower/envconfig" - "github.com/sirupsen/logrus" "github.com/spf13/afero" "github.com/spf13/pflag" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" "github.com/loadimpact/k6/lib" - "github.com/loadimpact/k6/lib/scheduler" - "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/lib/executor" "github.com/loadimpact/k6/stats" "github.com/loadimpact/k6/stats/cloud" "github.com/loadimpact/k6/stats/csv" @@ -84,6 +82,15 @@ type Config struct { } `json:"collectors"` } +// Validate checks if all of the specified options make sense +func (c Config) Validate() []error { + errors := c.Options.Validate() + //TODO: validate all of the other options... that we should have already been validating... + //TODO: maybe integrate an external validation lib: https://github.com/avelino/awesome-go#validation + + return errors +} + func (c Config) Apply(cfg Config) Config { c.Options = c.Options.Apply(cfg.Options) if len(cfg.Out) > 0 { @@ -197,97 +204,6 @@ func readEnvConfig() (conf Config, err error) { return conf, nil } -type executionConflictConfigError string - -func (e executionConflictConfigError) Error() string { - return string(e) -} - -var _ error = executionConflictConfigError("") - -func getConstantLoopingVUsExecution(duration types.NullDuration, vus null.Int) scheduler.ConfigMap { - ds := scheduler.NewConstantLoopingVUsConfig(lib.DefaultSchedulerName) - ds.VUs = vus - ds.Duration = duration - return scheduler.ConfigMap{lib.DefaultSchedulerName: ds} -} - -func getVariableLoopingVUsExecution(stages []lib.Stage, startVUs null.Int) scheduler.ConfigMap { - ds := scheduler.NewVariableLoopingVUsConfig(lib.DefaultSchedulerName) - ds.StartVUs = startVUs - for _, s := range stages { - if s.Duration.Valid { - ds.Stages = append(ds.Stages, scheduler.Stage{Duration: s.Duration, Target: s.Target}) - } - } - return scheduler.ConfigMap{lib.DefaultSchedulerName: ds} -} - -func getSharedIterationsExecution(iterations null.Int, duration types.NullDuration, vus null.Int) scheduler.ConfigMap { - ds := scheduler.NewSharedIterationsConfig(lib.DefaultSchedulerName) - ds.VUs = vus - ds.Iterations = iterations - if duration.Valid { - ds.MaxDuration = duration - } - return scheduler.ConfigMap{lib.DefaultSchedulerName: ds} -} - -// This checks for conflicting options and turns any shortcut options (i.e. duration, iterations, -// stages) into the proper scheduler configuration -func deriveExecutionConfig(conf Config) (Config, error) { - result := conf - switch { - case conf.Iterations.Valid: - if len(conf.Stages) > 0 { // stages isn't nil (not set) and isn't explicitly set to empty - //TODO: make this an executionConflictConfigError in the next version - logrus.Warn("Specifying both iterations and stages is deprecated and won't be supported in the future k6 versions") - } - - result.Execution = getSharedIterationsExecution(conf.Iterations, conf.Duration, conf.VUs) - // TODO: maybe add a new flag that will be used as a shortcut to per-VU iterations? - - case conf.Duration.Valid: - if len(conf.Stages) > 0 { // stages isn't nil (not set) and isn't explicitly set to empty - //TODO: make this an executionConflictConfigError in the next version - logrus.Warn("Specifying both duration and stages is deprecated and won't be supported in the future k6 versions") - } - - if conf.Duration.Duration <= 0 { - //TODO: make this an executionConflictConfigError in the next version - msg := "Specifying infinite duration in this way is deprecated and won't be supported in the future k6 versions" - logrus.Warn(msg) - } else { - result.Execution = getConstantLoopingVUsExecution(conf.Duration, conf.VUs) - } - - case len(conf.Stages) > 0: // stages isn't nil (not set) and isn't explicitly set to empty - result.Execution = getVariableLoopingVUsExecution(conf.Stages, conf.VUs) - - default: - if conf.Execution != nil { // If someone set this, regardless if its empty - //TODO: remove this warning in the next version - logrus.Warn("The execution settings are not functional in this k6 release, they will be ignored") - } - - if len(conf.Execution) == 0 { // If unset or set to empty - // No execution parameters whatsoever were specified, so we'll create a per-VU iterations config - // with 1 VU and 1 iteration. We're choosing the per-VU config, since that one could also - // be executed both locally, and in the cloud. - result.Execution = scheduler.ConfigMap{ - lib.DefaultSchedulerName: scheduler.NewPerVUIterationsConfig(lib.DefaultSchedulerName), - } - } - } - - //TODO: validate the config; questions: - // - separately validate the duration, iterations and stages for better error messages? - // - or reuse the execution validation somehow, at the end? or something mixed? - // - here or in getConsolidatedConfig() or somewhere else? - - return result, nil -} - // Assemble the final consolidated configuration from all of the different sources: // - start with the CLI-provided options to get shadowed (non-Valid) defaults in there // - add the global file config options @@ -346,18 +262,24 @@ func applyDefault(conf Config) Config { return conf } -func deriveAndValidateConfig(conf Config) (Config, error) { - result, err := deriveExecutionConfig(conf) +func deriveAndValidateConfig(conf Config, isExecutable func(string) bool) (result Config, err error) { + result = conf + result.Options, err = executor.DeriveScenariosFromShortcuts(conf.Options) if err != nil { return result, err } - return result, validateConfig(conf) + return result, validateConfig(result, isExecutable) } -//TODO: remove ↓ -//nolint:unparam -func validateConfig(conf Config) error { +func validateConfig(conf Config, isExecutable func(string) bool) error { errList := conf.Validate() + + for _, ec := range conf.Scenarios { + if err := validateScenarioConfig(ec, isExecutable); err != nil { + errList = append(errList, err) + } + } + if len(errList) == 0 { return nil } @@ -366,9 +288,14 @@ func validateConfig(conf Config) error { for _, err := range errList { errMsgParts = append(errMsgParts, fmt.Sprintf("\t- %s", err.Error())) } - errMsg := errors.New(strings.Join(errMsgParts, "\n")) - //TODO: actually return the error here instead of warning, so k6 aborts on config validation errors - logrus.Warn(errMsg) + return errors.New(strings.Join(errMsgParts, "\n")) +} + +func validateScenarioConfig(conf lib.ExecutorConfig, isExecutable func(string) bool) error { + execFn := conf.GetExec() + if !isExecutable(execFn) { + return fmt.Errorf("executor %s: function '%s' not found in exports", conf.GetName(), execFn) + } return nil } diff --git a/cmd/config_consolidation_test.go b/cmd/config_consolidation_test.go index d733e4afd11..b11763c7197 100644 --- a/cmd/config_consolidation_test.go +++ b/cmd/config_consolidation_test.go @@ -21,10 +21,8 @@ package cmd import ( "fmt" - "io" "io/ioutil" "os" - "strings" "testing" "time" @@ -33,53 +31,22 @@ import ( "github.com/spf13/pflag" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" "github.com/loadimpact/k6/lib" - "github.com/loadimpact/k6/lib/scheduler" + "github.com/loadimpact/k6/lib/executor" "github.com/loadimpact/k6/lib/testutils" + "github.com/loadimpact/k6/lib/testutils/minirunner" "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/stats" ) -// A helper funcion for setting arbitrary environment variables and -// restoring the old ones at the end, usually by deferring the returned callback -//TODO: remove these hacks when we improve the configuration... we shouldn't -// have to mess with the global environment at all... -func setEnv(t *testing.T, newEnv []string) (restoreEnv func()) { - actuallSetEnv := func(env []string, abortOnSetErr bool) { - os.Clearenv() - for _, e := range env { - val := "" - pair := strings.SplitN(e, "=", 2) - if len(pair) > 1 { - val = pair[1] - } - err := os.Setenv(pair[0], val) - if abortOnSetErr { - require.NoError(t, err) - } else if err != nil { - t.Logf( - "Received a non-aborting but unexpected error '%s' when setting env.var '%s' to '%s'", - err, pair[0], val, - ) - } - } - } - oldEnv := os.Environ() - actuallSetEnv(newEnv, true) - - return func() { - actuallSetEnv(oldEnv, false) - } -} - func verifyOneIterPerOneVU(t *testing.T, c Config) { - // No config anywhere should result in a 1 VU with a 1 uninterruptible iteration config - sched := c.Execution[lib.DefaultSchedulerName] - require.NotEmpty(t, sched) - require.IsType(t, scheduler.PerVUIteationsConfig{}, sched) - perVuIters, ok := sched.(scheduler.PerVUIteationsConfig) + // No config anywhere should result in a 1 VU with a 1 iteration config + exec := c.Scenarios[lib.DefaultScenarioName] + require.NotEmpty(t, exec) + require.IsType(t, executor.PerVUIterationsConfig{}, exec) + perVuIters, ok := exec.(executor.PerVUIterationsConfig) require.True(t, ok) assert.Equal(t, null.NewInt(1, false), perVuIters.Iterations) assert.Equal(t, null.NewInt(1, false), perVuIters.VUs) @@ -87,10 +54,10 @@ func verifyOneIterPerOneVU(t *testing.T, c Config) { func verifySharedIters(vus, iters null.Int) func(t *testing.T, c Config) { return func(t *testing.T, c Config) { - sched := c.Execution[lib.DefaultSchedulerName] - require.NotEmpty(t, sched) - require.IsType(t, scheduler.SharedIteationsConfig{}, sched) - sharedIterConfig, ok := sched.(scheduler.SharedIteationsConfig) + exec := c.Scenarios[lib.DefaultScenarioName] + require.NotEmpty(t, exec) + require.IsType(t, executor.SharedIterationsConfig{}, exec) + sharedIterConfig, ok := exec.(executor.SharedIterationsConfig) require.True(t, ok) assert.Equal(t, vus, sharedIterConfig.VUs) assert.Equal(t, iters, sharedIterConfig.Iterations) @@ -101,10 +68,10 @@ func verifySharedIters(vus, iters null.Int) func(t *testing.T, c Config) { func verifyConstLoopingVUs(vus null.Int, duration time.Duration) func(t *testing.T, c Config) { return func(t *testing.T, c Config) { - sched := c.Execution[lib.DefaultSchedulerName] - require.NotEmpty(t, sched) - require.IsType(t, scheduler.ConstantLoopingVUsConfig{}, sched) - clvc, ok := sched.(scheduler.ConstantLoopingVUsConfig) + exec := c.Scenarios[lib.DefaultScenarioName] + require.NotEmpty(t, exec) + require.IsType(t, executor.ConstantVUsConfig{}, exec) + clvc, ok := exec.(executor.ConstantVUsConfig) require.True(t, ok) assert.Equal(t, vus, clvc.VUs) assert.Equal(t, types.NullDurationFrom(duration), clvc.Duration) @@ -113,12 +80,12 @@ func verifyConstLoopingVUs(vus null.Int, duration time.Duration) func(t *testing } } -func verifyVarLoopingVUs(startVus null.Int, stages []scheduler.Stage) func(t *testing.T, c Config) { +func verifyRampingVUs(startVus null.Int, stages []executor.Stage) func(t *testing.T, c Config) { return func(t *testing.T, c Config) { - sched := c.Execution[lib.DefaultSchedulerName] - require.NotEmpty(t, sched) - require.IsType(t, scheduler.VariableLoopingVUsConfig{}, sched) - clvc, ok := sched.(scheduler.VariableLoopingVUsConfig) + exec := c.Scenarios[lib.DefaultScenarioName] + require.NotEmpty(t, exec) + require.IsType(t, executor.RampingVUsConfig{}, exec) + clvc, ok := exec.(executor.RampingVUsConfig) require.True(t, ok) assert.Equal(t, startVus, clvc.StartVUs) assert.Equal(t, startVus, c.VUs) @@ -133,14 +100,14 @@ func verifyVarLoopingVUs(startVus null.Int, stages []scheduler.Stage) func(t *te // A helper function that accepts (duration in second, VUs) pairs and returns // a valid slice of stage structs -func buildStages(durationsAndVUs ...int64) []scheduler.Stage { +func buildStages(durationsAndVUs ...int64) []executor.Stage { l := len(durationsAndVUs) if l%2 != 0 { panic("wrong len") } - result := make([]scheduler.Stage, 0, l/2) + result := make([]executor.Stage, 0, l/2) for i := 0; i < l; i += 2 { - result = append(result, scheduler.Stage{ + result = append(result, executor.Stage{ Duration: types.NullDurationFrom(time.Duration(durationsAndVUs[i]) * time.Second), Target: null.IntFrom(durationsAndVUs[i+1]), }) @@ -149,7 +116,7 @@ func buildStages(durationsAndVUs ...int64) []scheduler.Stage { } func mostFlagSets() []flagSetInit { - //TODO: make this unnecessary... currently these are the only commands in which + // TODO: make this unnecessary... currently these are the only commands in which // getConsolidatedConfig() is used, but they also have differences in their CLI flags :/ // sigh... compromises... result := []flagSetInit{} @@ -189,7 +156,7 @@ type opts struct { runner *lib.Options fs afero.Fs - //TODO: remove this when the configuration is more reproducible and sane... + // TODO: remove this when the configuration is more reproducible and sane... // We use a func, because initializing a FlagSet that points to variables // actually will change those variables to their default values :| In our // case, this happens only some of the time, for global variables that @@ -203,25 +170,12 @@ type opts struct { } func resetStickyGlobalVars() { - //TODO: remove after fixing the config, obviously a dirty hack + // TODO: remove after fixing the config, obviously a dirty hack exitOnRunning = false configFilePath = "" runType = "" - runNoSetup = false - runNoTeardown = false } -// Something that makes the test also be a valid io.Writer, useful for passing it -// as an output for logs and CLI flag help messages... -type testOutput struct{ *testing.T } - -func (to testOutput) Write(p []byte) (n int, err error) { - to.Logf("%s", p) - return len(p), nil -} - -var _ io.Writer = testOutput{} - // exp contains the different events or errors we expect our test case to trigger. // for space and clarity, we use the fact that by default, all of the struct values are false type exp struct { @@ -230,7 +184,7 @@ type exp struct { consolidationError bool derivationError bool validationErrors bool - logWarning bool //TODO: remove in the next version? + logWarning bool } // A hell of a complicated test case, that still doesn't test things fully... @@ -261,26 +215,25 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { {opts{cli: []string{"-u", "4", "--duration", "60s"}}, exp{}, verifyConstLoopingVUs(I(4), 1*time.Minute)}, { opts{cli: []string{"--stage", "20s:10", "-s", "3m:5"}}, exp{}, - verifyVarLoopingVUs(null.NewInt(1, false), buildStages(20, 10, 180, 5)), + verifyRampingVUs(null.NewInt(1, false), buildStages(20, 10, 180, 5)), }, { opts{cli: []string{"-s", "1m6s:5", "--vus", "10"}}, exp{}, - verifyVarLoopingVUs(null.NewInt(10, true), buildStages(66, 5)), + verifyRampingVUs(null.NewInt(10, true), buildStages(66, 5)), }, {opts{cli: []string{"-u", "1", "-i", "6", "-d", "10s"}}, exp{}, func(t *testing.T, c Config) { verifySharedIters(I(1), I(6))(t, c) - sharedIterConfig := c.Execution[lib.DefaultSchedulerName].(scheduler.SharedIteationsConfig) + sharedIterConfig := c.Scenarios[lib.DefaultScenarioName].(executor.SharedIterationsConfig) assert.Equal(t, time.Duration(sharedIterConfig.MaxDuration.Duration), 10*time.Second) }}, // This should get a validation error since VUs are more than the shared iterations {opts{cli: []string{"--vus", "10", "-i", "6"}}, exp{validationErrors: true}, verifySharedIters(I(10), I(6))}, {opts{cli: []string{"-s", "10s:5", "-s", "10s:"}}, exp{validationErrors: true}, nil}, {opts{fs: defaultConfig(`{"stages": [{"duration": "20s"}], "vus": 10}`)}, exp{validationErrors: true}, nil}, - // These should emit a warning - //TODO: in next version, those should be an error - {opts{cli: []string{"-u", "2", "-d", "10s", "-s", "10s:20"}}, exp{logWarning: true}, nil}, - {opts{cli: []string{"-u", "3", "-i", "5", "-s", "10s:20"}}, exp{logWarning: true}, nil}, - {opts{cli: []string{"-u", "3", "-d", "0"}}, exp{logWarning: true}, nil}, + // These should emit a consolidation error + {opts{cli: []string{"-u", "2", "-d", "10s", "-s", "10s:20"}}, exp{derivationError: true}, nil}, + {opts{cli: []string{"-u", "3", "-i", "5", "-s", "10s:20"}}, exp{derivationError: true}, nil}, + {opts{cli: []string{"-u", "3", "-d", "0"}}, exp{derivationError: true}, nil}, { opts{runner: &lib.Options{ VUs: null.IntFrom(5), @@ -288,19 +241,19 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { Stages: []lib.Stage{ {Duration: types.NullDurationFrom(3 * time.Second), Target: I(20)}, }, - }}, exp{logWarning: true}, nil, + }}, exp{derivationError: true}, nil, }, - {opts{fs: defaultConfig(`{"execution": {}}`)}, exp{logWarning: true}, verifyOneIterPerOneVU}, + {opts{fs: defaultConfig(`{"scenarios": {}}`)}, exp{logWarning: true}, verifyOneIterPerOneVU}, // Test if environment variable shortcuts are working as expected {opts{env: []string{"K6_VUS=5", "K6_ITERATIONS=15"}}, exp{}, verifySharedIters(I(5), I(15))}, {opts{env: []string{"K6_VUS=10", "K6_DURATION=20s"}}, exp{}, verifyConstLoopingVUs(I(10), 20*time.Second)}, { opts{env: []string{"K6_STAGES=2m30s:11,1h1m:100"}}, exp{}, - verifyVarLoopingVUs(null.NewInt(1, false), buildStages(150, 11, 3660, 100)), + verifyRampingVUs(null.NewInt(1, false), buildStages(150, 11, 3660, 100)), }, { opts{env: []string{"K6_STAGES=100s:100,0m30s:0", "K6_VUS=0"}}, exp{}, - verifyVarLoopingVUs(null.NewInt(0, true), buildStages(100, 100, 30, 0)), + verifyRampingVUs(null.NewInt(0, true), buildStages(100, 100, 30, 0)), }, // Test if JSON configs work as expected {opts{fs: defaultConfig(`{"iterations": 77, "vus": 7}`)}, exp{}, verifySharedIters(I(7), I(77))}, @@ -308,6 +261,8 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { {opts{fs: getFS(nil), cli: []string{"--config", "/my/config.file"}}, exp{consolidationError: true}, nil}, // Test combinations between options and levels + {opts{cli: []string{"--vus", "1"}}, exp{}, verifyOneIterPerOneVU}, + {opts{cli: []string{"--vus", "10"}}, exp{logWarning: true}, verifyOneIterPerOneVU}, { opts{ fs: getFS([]file{{"/my/config.file", `{"vus": 8, "duration": "2m"}`}}), @@ -320,15 +275,14 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { env: []string{"K6_DURATION=15s"}, cli: []string{"--stage", ""}, }, - exp{}, verifyConstLoopingVUs(I(10), 15*time.Second), + exp{logWarning: true}, verifyOneIterPerOneVU, }, { opts{ runner: &lib.Options{VUs: null.IntFrom(5), Duration: types.NullDurationFrom(50 * time.Second)}, cli: []string{"--stage", "5s:5"}, }, - //TODO: this shouldn't be a warning in the next version, but the result will be different - exp{logWarning: true}, verifyConstLoopingVUs(I(5), 50*time.Second), + exp{}, verifyRampingVUs(I(5), buildStages(5, 5)), }, { opts{ @@ -336,16 +290,16 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { runner: &lib.Options{VUs: null.IntFrom(5)}, }, exp{}, - verifyVarLoopingVUs(null.NewInt(5, true), buildStages(20, 10)), + verifyRampingVUs(I(5), buildStages(20, 10)), }, { opts{ fs: defaultConfig(`{"stages": [{"duration": "20s", "target": 10}]}`), runner: &lib.Options{VUs: null.IntFrom(5)}, - env: []string{"K6_VUS=15", "K6_ITERATIONS=15"}, + env: []string{"K6_VUS=15", "K6_ITERATIONS=17"}, }, - exp{logWarning: true}, //TODO: this won't be a warning in the next version, but the result will be different - verifySharedIters(I(15), I(15)), + exp{}, + verifySharedIters(I(15), I(17)), }, { opts{ @@ -355,40 +309,26 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { cli: []string{"--stage", "44s:44", "-s", "55s:55"}, }, exp{}, - verifyVarLoopingVUs(null.NewInt(33, true), buildStages(44, 44, 55, 55)), + verifyRampingVUs(null.NewInt(33, true), buildStages(44, 44, 55, 55)), }, - //TODO: test the future full overwriting of the duration/iterations/stages/execution options + // TODO: test the future full overwriting of the duration/iterations/stages/execution options { opts{ fs: defaultConfig(`{ - "execution": { "someKey": { - "type": "constant-looping-vus", "vus": 10, "duration": "60s", "interruptible": false, - "iterationTimeout": "10s", "startTime": "70s", "env": {"test": "mest"}, "exec": "someFunc" + "scenarios": { "someKey": { + "executor": "constant-vus", "vus": 10, "duration": "60s", "gracefulStop": "10s", + "startTime": "70s", "env": {"test": "mest"}, "exec": "someFunc" }}}`), env: []string{"K6_ITERATIONS=25"}, cli: []string{"--vus", "12"}, }, exp{}, verifySharedIters(I(12), I(25)), }, - { - opts{ - fs: defaultConfig(` - { - "execution": { - "default": { - "type": "constant-looping-vus", - "vus": 10, - "duration": "60s" - } - }, - "vus": 10, - "duration": "60s" - }`, - ), - }, - exp{}, verifyConstLoopingVUs(I(10), 60*time.Second), - }, + + // TODO: test the externally controlled executor + // TODO: test execution-segment + // Just in case, verify that no options will result in the same 1 vu 1 iter config {opts{}, exp{}, verifyOneIterPerOneVU}, @@ -400,8 +340,10 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { assert.Equal(t, stats.SystemTagSet(0), *c.Options.SystemTags) }}, { - opts{runner: &lib.Options{ - SystemTags: stats.NewSystemTagSet(stats.TagSubproto, stats.TagURL)}, + opts{ + runner: &lib.Options{ + SystemTags: stats.NewSystemTagSet(stats.TagSubproto, stats.TagURL), + }, }, exp{}, func(t *testing.T, c Config) { @@ -433,8 +375,8 @@ func getConfigConsolidationTestCases() []configConsolidationTestCase { assert.Equal(t, []string{"avg", "p(90)", "count"}, c.Options.SummaryTrendStats) }, }, - //TODO: test for differences between flagsets - //TODO: more tests in general, especially ones not related to execution parameters... + // TODO: test for differences between flagsets + // TODO: more tests in general, especially ones not related to execution parameters... } } @@ -445,16 +387,17 @@ func runTestCase( logHook *testutils.SimpleLogrusHook, ) { t.Logf("Test with opts=%#v and exp=%#v\n", testCase.options, testCase.expected) - logrus.SetOutput(testOutput{t}) + output := testutils.NewTestOutput(t) + logrus.SetOutput(output) logHook.Drain() - restoreEnv := setEnv(t, testCase.options.env) + restoreEnv := testutils.SetEnv(t, testCase.options.env) defer restoreEnv() flagSet := newFlagSet() defer resetStickyGlobalVars() - flagSet.SetOutput(testOutput{t}) - //flagSet.PrintDefaults() + flagSet.SetOutput(output) + // flagSet.PrintDefaults() cliErr := flagSet.Parse(testCase.options.cli) if testCase.expected.cliParseError { @@ -463,7 +406,7 @@ func runTestCase( } require.NoError(t, cliErr) - //TODO: remove these hacks when we improve the configuration... + // TODO: remove these hacks when we improve the configuration... var cliConf Config if flagSet.Lookup("out") != nil { cliConf, cliErr = getConfig(flagSet) @@ -479,7 +422,7 @@ func runTestCase( var runner lib.Runner if testCase.options.runner != nil { - runner = &lib.MiniRunner{Options: *testCase.options.runner} + runner = &minirunner.MiniRunner{Options: *testCase.options.runner} } if testCase.options.fs == nil { t.Logf("Creating an empty FS for this test") @@ -493,7 +436,8 @@ func runTestCase( } require.NoError(t, err) - derivedConfig, err := deriveExecutionConfig(consolidatedConfig) + derivedConfig := consolidatedConfig + derivedConfig.Options, err = executor.DeriveScenariosFromShortcuts(consolidatedConfig.Options) if testCase.expected.derivationError { require.Error(t, err) return diff --git a/cmd/config_test.go b/cmd/config_test.go index 37481b99912..5a4595307ed 100644 --- a/cmd/config_test.go +++ b/cmd/config_test.go @@ -21,12 +21,18 @@ package cmd import ( - "os" + "fmt" "testing" + "time" "github.com/kelseyhightower/envconfig" "github.com/stretchr/testify/assert" "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/executor" + "github.com/loadimpact/k6/lib/testutils" + "github.com/loadimpact/k6/lib/types" ) type testCmdData struct { @@ -41,7 +47,6 @@ type testCmdTest struct { } func TestConfigCmd(t *testing.T) { - testdata := []testCmdData{ { Name: "Out", @@ -101,11 +106,13 @@ func TestConfigEnv(t *testing.T) { }, } for field, data := range testdata { - os.Clearenv() + field, data := field, data t.Run(field.Name, func(t *testing.T) { for value, fn := range data { + value, fn := value, fn t.Run(`"`+value+`"`, func(t *testing.T) { - assert.NoError(t, os.Setenv(field.Key, value)) + restore := testutils.SetEnv(t, []string{fmt.Sprintf("%s=%s", field.Key, value)}) + defer restore() var config Config assert.NoError(t, envconfig.Process("", &config)) fn(config) @@ -132,3 +139,48 @@ func TestConfigApply(t *testing.T) { assert.Equal(t, []string{"influxdb", "json"}, conf.Out) }) } + +func TestDeriveAndValidateConfig(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + conf Config + isExec bool + err string + }{ + {"defaultOK", Config{}, true, ""}, + {"defaultErr", Config{}, false, + "executor default: function 'default' not found in exports"}, + {"nonDefaultOK", Config{Options: lib.Options{Scenarios: lib.ScenarioConfigs{ + "per_vu_iters": executor.PerVUIterationsConfig{BaseConfig: executor.BaseConfig{ + Name: "per_vu_iters", Type: "per-vu-iterations", Exec: null.StringFrom("nonDefault")}, + VUs: null.IntFrom(1), + Iterations: null.IntFrom(1), + MaxDuration: types.NullDurationFrom(time.Second), + }}}}, true, "", + }, + {"nonDefaultErr", Config{Options: lib.Options{Scenarios: lib.ScenarioConfigs{ + "per_vu_iters": executor.PerVUIterationsConfig{BaseConfig: executor.BaseConfig{ + Name: "per_vu_iters", Type: "per-vu-iterations", Exec: null.StringFrom("nonDefaultErr")}, + VUs: null.IntFrom(1), + Iterations: null.IntFrom(1), + MaxDuration: types.NullDurationFrom(time.Second), + }}}}, false, + "executor per_vu_iters: function 'nonDefaultErr' not found in exports", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + _, err := deriveAndValidateConfig(tc.conf, + func(_ string) bool { return tc.isExec }) + if tc.err != "" { + assert.Contains(t, err.Error(), tc.err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/cmd/configdir_go112.go b/cmd/configdir_go112.go deleted file mode 100644 index 4b1eae33f2b..00000000000 --- a/cmd/configdir_go112.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build !go1.13 - -/* - * - * k6 - a next-generation load testing tool - * Copyright (C) 2019 Load Impact - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -package cmd - -import ( - "errors" - "os" - "runtime" -) - -// This code is copied from os.UserConfigDir() as of go1.13 -// https://github.com/golang/go/blob/release-branch.go1.13/src/os/file.go#L419 -func configDir() (string, error) { - var dir string - - switch runtime.GOOS { - case "windows": - dir = os.Getenv("AppData") - if dir == "" { - return "", errors.New("%AppData% is not defined") - } - - case "darwin": - dir = os.Getenv("HOME") - if dir == "" { - return "", errors.New("$HOME is not defined") - } - dir += "/Library/Application Support" - - case "plan9": - dir = os.Getenv("home") - if dir == "" { - return "", errors.New("$home is not defined") - } - dir += "/lib" - - default: // Unix - dir = os.Getenv("XDG_CONFIG_HOME") - if dir == "" { - dir = os.Getenv("HOME") - if dir == "" { - return "", errors.New("neither $XDG_CONFIG_HOME nor $HOME are defined") - } - dir += "/.config" - } - } - - return dir, nil -} diff --git a/cmd/convert.go b/cmd/convert.go index 5797109f8da..6c77fd758b3 100644 --- a/cmd/convert.go +++ b/cmd/convert.go @@ -26,10 +26,11 @@ import ( "io/ioutil" "path/filepath" + "github.com/spf13/cobra" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/converter/har" "github.com/loadimpact/k6/lib" - "github.com/spf13/cobra" - null "gopkg.in/guregu/null.v3" ) var ( diff --git a/cmd/convert_test.go b/cmd/convert_test.go index 98d54c401e8..7943988f004 100644 --- a/cmd/convert_test.go +++ b/cmd/convert_test.go @@ -22,15 +22,15 @@ package cmd import ( "bytes" - "os" + "io/ioutil" + "path/filepath" "regexp" "testing" - "io/ioutil" - "github.com/pmezard/go-difflib/difflib" "github.com/spf13/afero" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const testHAR = ` @@ -122,24 +122,19 @@ export default function() { ` func TestIntegrationConvertCmd(t *testing.T) { - var tmpFile, err = ioutil.TempFile("", "") - if err != nil { - t.Fatalf("Couldn't create temporary file: %s", err) - } - harFile := tmpFile.Name() - defer os.Remove(harFile) - tmpFile.Close() t.Run("Correlate", func(t *testing.T) { + harFile, err := filepath.Abs("correlate.har") + require.NoError(t, err) har, err := ioutil.ReadFile("testdata/example.har") - assert.NoError(t, err) + require.NoError(t, err) expectedTestPlan, err := ioutil.ReadFile("testdata/example.js") - assert.NoError(t, err) + require.NoError(t, err) defaultFs = afero.NewMemMapFs() err = afero.WriteFile(defaultFs, harFile, har, 0644) - assert.NoError(t, err) + require.NoError(t, err) buf := &bytes.Buffer{} defaultWriter = buf @@ -178,8 +173,10 @@ func TestIntegrationConvertCmd(t *testing.T) { } }) t.Run("Stdout", func(t *testing.T) { + harFile, err := filepath.Abs("stdout.har") + require.NoError(t, err) defaultFs = afero.NewMemMapFs() - err := afero.WriteFile(defaultFs, harFile, []byte(testHAR), 0644) + err = afero.WriteFile(defaultFs, harFile, []byte(testHAR), 0644) assert.NoError(t, err) buf := &bytes.Buffer{} @@ -190,11 +187,16 @@ func TestIntegrationConvertCmd(t *testing.T) { assert.Equal(t, testHARConvertResult, buf.String()) }) t.Run("Output file", func(t *testing.T) { + harFile, err := filepath.Abs("output.har") + require.NoError(t, err) defaultFs = afero.NewMemMapFs() - err := afero.WriteFile(defaultFs, harFile, []byte(testHAR), 0644) + err = afero.WriteFile(defaultFs, harFile, []byte(testHAR), 0644) assert.NoError(t, err) err = convertCmd.Flags().Set("output", "/output.js") + defer func() { + err = convertCmd.Flags().Set("output", "") + }() assert.NoError(t, err) err = convertCmd.RunE(convertCmd, []string{harFile}) assert.NoError(t, err) diff --git a/cmd/inspect.go b/cmd/inspect.go index c48221d5cf1..6b0aac06d83 100644 --- a/cmd/inspect.go +++ b/cmd/inspect.go @@ -26,10 +26,11 @@ import ( "fmt" "os" + "github.com/spf13/cobra" + "github.com/loadimpact/k6/js" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/loader" - "github.com/spf13/cobra" ) // inspectCmd represents the resume command @@ -54,7 +55,7 @@ var inspectCmd = &cobra.Command{ typ = detectType(src.Data) } - runtimeOptions, err := getRuntimeOptions(cmd.Flags()) + runtimeOptions, err := getRuntimeOptions(cmd.Flags(), buildEnvMap(os.Environ())) if err != nil { return err } diff --git a/cmd/login_cloud.go b/cmd/login_cloud.go index c43545c7847..453743ad5a1 100644 --- a/cmd/login_cloud.go +++ b/cmd/login_cloud.go @@ -23,14 +23,14 @@ package cmd import ( "os" + "github.com/pkg/errors" + "github.com/spf13/afero" + "github.com/spf13/cobra" "gopkg.in/guregu/null.v3" "github.com/loadimpact/k6/lib/consts" "github.com/loadimpact/k6/stats/cloud" "github.com/loadimpact/k6/ui" - "github.com/pkg/errors" - "github.com/spf13/afero" - "github.com/spf13/cobra" ) // loginCloudCommand represents the 'login cloud' command diff --git a/cmd/login_influxdb.go b/cmd/login_influxdb.go index 134e471fdaf..44de72c141c 100644 --- a/cmd/login_influxdb.go +++ b/cmd/login_influxdb.go @@ -24,12 +24,13 @@ import ( "os" "time" - "github.com/loadimpact/k6/lib/types" - "github.com/loadimpact/k6/stats/influxdb" - "github.com/loadimpact/k6/ui" "github.com/mitchellh/mapstructure" "github.com/spf13/afero" "github.com/spf13/cobra" + + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats/influxdb" + "github.com/loadimpact/k6/ui" ) // loginInfluxDBCommand represents the 'login influxdb' command diff --git a/cmd/options.go b/cmd/options.go index decdfe453d2..78480102526 100644 --- a/cmd/options.go +++ b/cmd/options.go @@ -25,14 +25,15 @@ import ( "strings" "time" + "github.com/pkg/errors" + "github.com/spf13/pflag" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/consts" "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/stats" "github.com/loadimpact/k6/ui" - "github.com/pkg/errors" - "github.com/spf13/pflag" - null "gopkg.in/guregu/null.v3" ) var ( @@ -45,11 +46,19 @@ func optionFlagSet() *pflag.FlagSet { flags := pflag.NewFlagSet("", 0) flags.SortFlags = false flags.Int64P("vus", "u", 1, "number of virtual users") + + // TODO: delete in a few versions flags.Int64P("max", "m", 0, "max available virtual users") + _ = flags.MarkDeprecated("max", "the global MaxVUs option is obsolete and doesn't affect the k6 script execution") + flags.DurationP("duration", "d", 0, "test duration limit") flags.Int64P("iterations", "i", 0, "script total iteration limit (among all VUs)") flags.StringSliceP("stage", "s", nil, "add a `stage`, as `[duration]:[target]`") + flags.String("execution-segment", "", "limit execution to the specified segment, e.g. 10%, 1/3, 0.2:2/3") + flags.String("execution-segment-sequence", "", "the execution segment sequence") // TODO better description flags.BoolP("paused", "p", false, "start the test in a paused state") + flags.Bool("no-setup", false, "don't run setup()") + flags.Bool("no-teardown", false, "don't run teardown()") flags.Int64("max-redirects", 10, "follow at most n redirects") flags.Int64("batch", 20, "max parallel batch reqs") flags.Int64("batch-per-host", 6, "max parallel batch reqs per host") @@ -88,10 +97,11 @@ func optionFlagSet() *pflag.FlagSet { func getOptions(flags *pflag.FlagSet) (lib.Options, error) { opts := lib.Options{ VUs: getNullInt64(flags, "vus"), - VUsMax: getNullInt64(flags, "max"), Duration: getNullDuration(flags, "duration"), Iterations: getNullInt64(flags, "iterations"), Paused: getNullBool(flags, "paused"), + NoSetup: getNullBool(flags, "no-setup"), + NoTeardown: getNullBool(flags, "no-teardown"), MaxRedirects: getNullInt64(flags, "max-redirects"), Batch: getNullInt64(flags, "batch"), BatchPerHost: getNullInt64(flags, "batch-per-host"), @@ -106,8 +116,8 @@ func getOptions(flags *pflag.FlagSet) (lib.Options, error) { DiscardResponseBodies: getNullBool(flags, "discard-response-bodies"), // Default values for options without CLI flags: // TODO: find a saner and more dev-friendly and error-proof way to handle options - SetupTimeout: types.NullDuration{Duration: types.Duration(10 * time.Second), Valid: false}, - TeardownTimeout: types.NullDuration{Duration: types.Duration(10 * time.Second), Valid: false}, + SetupTimeout: types.NullDuration{Duration: types.Duration(60 * time.Second), Valid: false}, + TeardownTimeout: types.NullDuration{Duration: types.Duration(60 * time.Second), Valid: false}, MetricSamplesBufferSize: null.NewInt(1000, false), } @@ -131,6 +141,32 @@ func getOptions(flags *pflag.FlagSet) (lib.Options, error) { } } + if flags.Changed("execution-segment") { + executionSegmentStr, err := flags.GetString("execution-segment") + if err != nil { + return opts, err + } + segment := new(lib.ExecutionSegment) + err = segment.UnmarshalText([]byte(executionSegmentStr)) + if err != nil { + return opts, err + } + opts.ExecutionSegment = segment + } + + if flags.Changed("execution-segment-sequence") { + executionSegmentSequenceStr, err := flags.GetString("execution-segment-sequence") + if err != nil { + return opts, err + } + segmentSequence := new(lib.ExecutionSegmentSequence) + err = segmentSequence.UnmarshalText([]byte(executionSegmentSequenceStr)) + if err != nil { + return opts, err + } + opts.ExecutionSegmentSequence = segmentSequence + } + if flags.Changed("system-tags") { systemTagList, err := flags.GetStringSlice("system-tags") if err != nil { diff --git a/cmd/pause.go b/cmd/pause.go index a15eda226b0..6ecc5066546 100644 --- a/cmd/pause.go +++ b/cmd/pause.go @@ -23,11 +23,12 @@ package cmd import ( "context" + "github.com/spf13/cobra" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/api/v1" "github.com/loadimpact/k6/api/v1/client" "github.com/loadimpact/k6/ui" - "github.com/spf13/cobra" - "gopkg.in/guregu/null.v3" ) // pauseCmd represents the pause command diff --git a/cmd/resume.go b/cmd/resume.go index 93fe3bc0733..2e8183e5f19 100644 --- a/cmd/resume.go +++ b/cmd/resume.go @@ -23,11 +23,12 @@ package cmd import ( "context" + "github.com/spf13/cobra" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/api/v1" "github.com/loadimpact/k6/api/v1/client" "github.com/loadimpact/k6/ui" - "github.com/spf13/cobra" - "gopkg.in/guregu/null.v3" ) // resumeCmd represents the resume command diff --git a/cmd/root.go b/cmd/root.go index b648d31d59a..1929546fd06 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -40,12 +40,14 @@ import ( var BannerColor = color.New(color.FgCyan) +//TODO: remove these global variables +//nolint:gochecknoglobals var ( outMutex = &sync.Mutex{} stdoutTTY = isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) stderrTTY = isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd()) - stdout = consoleWriter{colorable.NewColorableStdout(), stdoutTTY, outMutex} - stderr = consoleWriter{colorable.NewColorableStderr(), stderrTTY, outMutex} + stdout = &consoleWriter{colorable.NewColorableStdout(), stdoutTTY, outMutex, nil} + stderr = &consoleWriter{colorable.NewColorableStderr(), stderrTTY, outMutex, nil} ) const defaultConfigFileName = "config.json" @@ -75,6 +77,18 @@ var RootCmd = &cobra.Command{ PersistentPreRun: func(cmd *cobra.Command, args []string) { setupLoggers(logFmt) if noColor { + // TODO: figure out something else... currently, with the wrappers + // below, we're stripping any colors from the output after we've + // added them. The problem is that, besides being very inefficient, + // this actually also strips other special characters from the + // intended output, like the progressbar formatting ones, which + // would otherwise be fine (in a TTY). + // + // It would be much better if we avoid messing with the output and + // instead have a parametrized instance of the color library. It + // will return colored output if colors are enabled and simply + // return the passed input as-is (i.e. be a noop) if colors are + // disabled... stdout.Writer = colorable.NewNonColorable(os.Stdout) stderr.Writer = colorable.NewNonColorable(os.Stderr) } @@ -119,7 +133,7 @@ func rootCmdPersistentFlagSet() *pflag.FlagSet { } func init() { - confDir, err := configDir() + confDir, err := os.UserConfigDir() if err != nil { logrus.WithError(err).Warn("could not get config directory") confDir = ".config" diff --git a/cmd/run.go b/cmd/run.go index 27c775e4766..d9032bc0b5f 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -25,12 +25,11 @@ import ( "bytes" "context" "encoding/json" - "fmt" "net/http" "os" "os/signal" "runtime" - "strings" + "sync" "syscall" "time" @@ -39,7 +38,6 @@ import ( "github.com/spf13/afero" "github.com/spf13/cobra" "github.com/spf13/pflag" - null "gopkg.in/guregu/null.v3" "github.com/loadimpact/k6/api" "github.com/loadimpact/k6/core" @@ -47,9 +45,9 @@ import ( "github.com/loadimpact/k6/js" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/consts" - "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/loader" "github.com/loadimpact/k6/ui" + "github.com/loadimpact/k6/ui/pb" ) const ( @@ -62,14 +60,13 @@ const ( genericTimeoutErrorCode = 102 genericEngineErrorCode = 103 invalidConfigErrorCode = 104 + externalAbortErrorCode = 105 + cannotStartRESTAPIErrorCode = 106 ) -var ( - //TODO: fix this, global variables are not very testable... - runType = os.Getenv("K6_TYPE") - runNoSetup = os.Getenv("K6_NO_SETUP") != "" - runNoTeardown = os.Getenv("K6_NO_TEARDOWN") != "" -) +// TODO: fix this, global variables are not very testable... +//nolint:gochecknoglobals +var runType = os.Getenv("K6_TYPE") // runCmd represents the run command. var runCmd = &cobra.Command{ @@ -99,16 +96,16 @@ a commandline interface for interacting with it.`, k6 run -o influxdb=http://1.2.3.4:8086/k6`[1:], Args: exactArgsWithMsg(1, "arg should either be \"-\", if reading script from stdin, or a path to a script file"), RunE: func(cmd *cobra.Command, args []string) error { - //TODO: disable in quiet mode? + // TODO: disable in quiet mode? _, _ = BannerColor.Fprintf(stdout, "\n%s\n\n", consts.Banner) - initBar := ui.ProgressBar{ - Width: 60, - Left: func() string { return " init" }, - } + initBar := pb.New( + pb.WithConstLeft(" Init"), + pb.WithConstProgress(0, "runner"), + ) + printBar(initBar) // Create the Runner. - fprintf(stdout, "%s runner\r", initBar.String()) pwd, err := os.Getwd() if err != nil { return err @@ -120,7 +117,7 @@ a commandline interface for interacting with it.`, return err } - runtimeOptions, err := getRuntimeOptions(cmd.Flags()) + runtimeOptions, err := getRuntimeOptions(cmd.Flags(), buildEnvMap(os.Environ())) if err != nil { return err } @@ -130,7 +127,7 @@ a commandline interface for interacting with it.`, return err } - fprintf(stdout, "%s options\r", initBar.String()) + modifyAndPrintBar(initBar, pb.WithConstProgress(0, "options")) cliConf, err := getConfig(cmd.Flags()) if err != nil { @@ -141,37 +138,7 @@ a commandline interface for interacting with it.`, return err } - // If -m/--max isn't specified, figure out the max that should be needed. - if !conf.VUsMax.Valid { - conf.VUsMax = null.NewInt(conf.VUs.Int64, conf.VUs.Valid) - for _, stage := range conf.Stages { - if stage.Target.Valid && stage.Target.Int64 > conf.VUsMax.Int64 { - conf.VUsMax = stage.Target - } - } - } - - // If -d/--duration, -i/--iterations and -s/--stage are all unset, run to one iteration. - if !conf.Duration.Valid && !conf.Iterations.Valid && len(conf.Stages) == 0 { - conf.Iterations = null.IntFrom(1) - } - - if conf.Iterations.Valid && conf.Iterations.Int64 < conf.VUsMax.Int64 { - logrus.Warnf( - "All iterations (%d in this test run) are shared between all VUs, so some of the %d VUs will not execute even a single iteration!", - conf.Iterations.Int64, conf.VUsMax.Int64, - ) - } - - //TODO: move a bunch of the logic above to a config "constructor" and to the Validate() method - - // If duration is explicitly set to 0, it means run forever. - //TODO: just... handle this differently, e.g. as a part of the manual executor - if conf.Duration.Valid && conf.Duration.Duration == 0 { - conf.Duration = types.NullDuration{} - } - - conf, cerr := deriveAndValidateConfig(conf) + conf, cerr := deriveAndValidateConfig(conf, r.IsExecutable) if cerr != nil { return ExitCode{error: cerr, Code: invalidConfigErrorCode} } @@ -181,23 +148,57 @@ a commandline interface for interacting with it.`, return err } - // Create a local executor wrapping the runner. - fprintf(stdout, "%s executor\r", initBar.String()) - ex := local.New(r) - if runNoSetup { - ex.SetRunSetup(false) - } - if runNoTeardown { - ex.SetRunTeardown(false) + // TODO: don't use a global... or maybe change the logger? + logger := logrus.StandardLogger() + + // We prepare a bunch of contexts: + // - The runCtx is cancelled as soon as the Engine's run() lambda finishes, + // and can trigger things like the usage report and end of test summary. + // Crucially, metrics processing by the Engine will still work after this + // context is cancelled! + // - The lingerCtx is cancelled by Ctrl+C, and is used to wait for that + // event when k6 was ran with the --linger option. + // - The globalCtx is cancelled only after we're completely done with the + // test execution and any --linger has been cleared, so that the Engine + // can start winding down its metrics processing. + globalCtx, globalCancel := context.WithCancel(context.Background()) + defer globalCancel() + lingerCtx, lingerCancel := context.WithCancel(globalCtx) + defer lingerCancel() + runCtx, runCancel := context.WithCancel(lingerCtx) + defer runCancel() + + // Create a local execution scheduler wrapping the runner. + modifyAndPrintBar(initBar, pb.WithConstProgress(0, "execution scheduler")) + execScheduler, err := local.NewExecutionScheduler(r, logger) + if err != nil { + return err } + executionState := execScheduler.GetState() + + // This is manually triggered after the Engine's Run() has completed, + // and things like a single Ctrl+C don't affect it. We use it to make + // sure that the progressbars finish updating with the latest execution + // state one last time, after the test run has finished. + progressCtx, progressCancel := context.WithCancel(globalCtx) + defer progressCancel() + initBar = execScheduler.GetInitProgressBar() + progressBarWG := &sync.WaitGroup{} + progressBarWG.Add(1) + go func() { + showProgress(progressCtx, conf, execScheduler, logger) + progressBarWG.Done() + }() + // Create an engine. - fprintf(stdout, "%s engine\r", initBar.String()) - engine, err := core.NewEngine(ex, conf.Options) + modifyAndPrintBar(initBar, pb.WithConstProgress(0, "Init engine")) + engine, err := core.NewEngine(execScheduler, conf.Options, logger) if err != nil { return err } + // TODO: refactor, the engine should have a copy of the config... // Configure the engine. if conf.NoThresholds.Valid { engine.NoThresholds = conf.NoThresholds.Bool @@ -209,241 +210,103 @@ a commandline interface for interacting with it.`, engine.SummaryExport = conf.SummaryExport.String != "" } + executionPlan := execScheduler.GetExecutionPlan() // Create a collector and assign it to the engine if requested. - fprintf(stdout, "%s collector\r", initBar.String()) + modifyAndPrintBar(initBar, pb.WithConstProgress(0, "Init metric outputs")) for _, out := range conf.Out { t, arg := parseCollector(out) - collector, err := newCollector(t, arg, src, conf) - if err != nil { - return err + collector, cerr := newCollector(t, arg, src, conf, executionPlan) + if cerr != nil { + return cerr } - if err := collector.Init(); err != nil { - return err + if cerr = collector.Init(); cerr != nil { + return cerr } engine.Collectors = append(engine.Collectors, collector) } - // Create an API server. - fprintf(stdout, "%s server\r", initBar.String()) - go func() { - if err := api.ListenAndServe(address, engine); err != nil { - logrus.WithError(err).Warn("Error from API server") - } - }() - - // Write the big banner. - { - out := "-" - link := "" - - for idx, collector := range engine.Collectors { - if out != "-" { - out = out + "; " + conf.Out[idx] - } else { - out = conf.Out[idx] - } - - if l := collector.Link(); l != "" { - link = link + " (" + l + ")" + // Spin up the REST API server, if not disabled. + if address != "" { + modifyAndPrintBar(initBar, pb.WithConstProgress(0, "Init API server")) + go func() { + logger.Debugf("Starting the REST API server on %s", address) + if aerr := api.ListenAndServe(address, engine); aerr != nil { + // Only exit k6 if the user has explicitly set the REST API address + if cmd.Flags().Lookup("address").Changed { + logger.WithError(aerr).Error("Error from API server") + os.Exit(cannotStartRESTAPIErrorCode) + } else { + logger.WithError(aerr).Warn("Error from API server") + } } - } - - fprintf(stdout, " execution: %s\n", ui.ValueColor.Sprint("local")) - fprintf(stdout, " output: %s%s\n", ui.ValueColor.Sprint(out), ui.ExtraColor.Sprint(link)) - fprintf(stdout, " script: %s\n", ui.ValueColor.Sprint(filename)) - fprintf(stdout, "\n") - - duration := ui.GrayColor.Sprint("-") - iterations := ui.GrayColor.Sprint("-") - if conf.Duration.Valid { - duration = ui.ValueColor.Sprint(conf.Duration.Duration) - } - if conf.Iterations.Valid { - iterations = ui.ValueColor.Sprint(conf.Iterations.Int64) - } - vus := ui.ValueColor.Sprint(conf.VUs.Int64) - max := ui.ValueColor.Sprint(conf.VUsMax.Int64) - - leftWidth := ui.StrWidth(duration) - if l := ui.StrWidth(vus); l > leftWidth { - leftWidth = l - } - durationPad := strings.Repeat(" ", leftWidth-ui.StrWidth(duration)) - vusPad := strings.Repeat(" ", leftWidth-ui.StrWidth(vus)) - - fprintf(stdout, " duration: %s,%s iterations: %s\n", duration, durationPad, iterations) - fprintf(stdout, " vus: %s,%s max: %s\n", vus, vusPad, max) - fprintf(stdout, "\n") + }() } - // Run the engine with a cancellable context. - fprintf(stdout, "%s starting\r", initBar.String()) - ctx, cancel := context.WithCancel(context.Background()) - errC := make(chan error) - go func() { errC <- engine.Run(ctx) }() + printExecutionDescription( + "local", filename, "", conf, execScheduler.GetState().ExecutionTuple, + executionPlan, engine.Collectors) // Trap Interrupts, SIGINTs and SIGTERMs. sigC := make(chan os.Signal, 1) signal.Notify(sigC, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) defer signal.Stop(sigC) + go func() { + sig := <-sigC + logger.WithField("sig", sig).Debug("Stopping k6 in response to signal...") + lingerCancel() // stop the test run, metric processing is cancelled below + + // If we get a second signal, we immediately exit, so something like + // https://github.com/loadimpact/k6/issues/971 never happens again + sig = <-sigC + logger.WithField("sig", sig).Error("Aborting k6 in response to signal") + globalCancel() // not that it matters, given the following command... + os.Exit(externalAbortErrorCode) + }() - // If the user hasn't opted out: report usage. + // Initialize the engine + modifyAndPrintBar(initBar, pb.WithConstProgress(0, "Init VUs")) + engineRun, engineWait, err := engine.Init(globalCtx, runCtx) + if err != nil { + return getExitCodeFromEngine(err) + } + + // Init has passed successfully, so unless disabled, make sure we send a + // usage report after the context is done. if !conf.NoUsageReport.Bool { + reportDone := make(chan struct{}) go func() { - u := "https://reports.k6.io/" - mime := "application/json" - var endTSeconds float64 - if endT := engine.Executor.GetEndTime(); endT.Valid { - endTSeconds = time.Duration(endT.Duration).Seconds() - } - var stagesEndTSeconds float64 - if stagesEndT := lib.SumStages(engine.Executor.GetStages()); stagesEndT.Valid { - stagesEndTSeconds = time.Duration(stagesEndT.Duration).Seconds() - } - body, err := json.Marshal(map[string]interface{}{ - "k6_version": consts.Version, - "vus_max": engine.Executor.GetVUsMax(), - "iterations": engine.Executor.GetEndIterations(), - "duration": endTSeconds, - "st_duration": stagesEndTSeconds, - "goos": runtime.GOOS, - "goarch": runtime.GOARCH, - }) - if err != nil { - panic(err) // This should never happen!! - } - _, _ = http.Post(u, mime, bytes.NewBuffer(body)) + <-runCtx.Done() + _ = reportUsage(execScheduler) + close(reportDone) }() - } - - // Prepare a progress bar. - progress := ui.ProgressBar{ - Width: 60, - Left: func() string { - if engine.Executor.IsPaused() { - return " paused" - } else if engine.Executor.IsRunning() { - return " running" - } else { - return " done" - } - }, - Right: func() string { - if endIt := engine.Executor.GetEndIterations(); endIt.Valid { - return fmt.Sprintf("%d / %d", engine.Executor.GetIterations(), endIt.Int64) - } - precision := 100 * time.Millisecond - atT := engine.Executor.GetTime() - stagesEndT := lib.SumStages(engine.Executor.GetStages()) - endT := engine.Executor.GetEndTime() - if !endT.Valid || (stagesEndT.Valid && endT.Duration > stagesEndT.Duration) { - endT = stagesEndT + defer func() { + select { + case <-reportDone: + case <-time.After(3 * time.Second): } - if endT.Valid { - return fmt.Sprintf("%s / %s", - (atT/precision)*precision, - (time.Duration(endT.Duration)/precision)*precision, - ) - } - return ((atT / precision) * precision).String() - }, + }() } - // Ticker for progress bar updates. Less frequent updates for non-TTYs, none if quiet. - updateFreq := 50 * time.Millisecond - if !stdoutTTY { - updateFreq = 1 * time.Second + // Start the test run + modifyAndPrintBar(initBar, pb.WithConstProgress(0, "Start test")) + if err := engineRun(); err != nil { + return getExitCodeFromEngine(err) } - ticker := time.NewTicker(updateFreq) - if quiet || conf.HTTPDebug.Valid && conf.HTTPDebug.String != "" { - ticker.Stop() - } - mainLoop: - for { - select { - case <-ticker.C: - if quiet || !stdoutTTY { - l := logrus.WithFields(logrus.Fields{ - "t": engine.Executor.GetTime(), - "i": engine.Executor.GetIterations(), - }) - fn := l.Info - if quiet { - fn = l.Debug - } - if engine.Executor.IsPaused() { - fn("Paused") - } else { - fn("Running") - } - break - } - - var prog float64 - if endIt := engine.Executor.GetEndIterations(); endIt.Valid { - prog = float64(engine.Executor.GetIterations()) / float64(endIt.Int64) - } else { - stagesEndT := lib.SumStages(engine.Executor.GetStages()) - endT := engine.Executor.GetEndTime() - if !endT.Valid || (stagesEndT.Valid && endT.Duration > stagesEndT.Duration) { - endT = stagesEndT - } - if endT.Valid { - prog = float64(engine.Executor.GetTime()) / float64(endT.Duration) - } - } - progress.Progress = prog - fprintf(stdout, "%s\x1b[0K\r", progress.String()) - case err := <-errC: - cancel() - if err == nil { - logrus.Debug("Engine terminated cleanly") - break mainLoop - } + runCancel() + logger.Debug("Engine run terminated cleanly") - switch e := errors.Cause(err).(type) { - case lib.TimeoutError: - switch e.Place() { - case "setup": - return ExitCode{error: err, Code: setupTimeoutErrorCode, Hint: e.Hint()} - case "teardown": - return ExitCode{error: err, Code: teardownTimeoutErrorCode, Hint: e.Hint()} - default: - return ExitCode{error: err, Code: genericTimeoutErrorCode} - } - default: - //nolint:golint - return ExitCode{error: errors.New("Engine error"), Code: genericEngineErrorCode, Hint: err.Error()} - } - case sig := <-sigC: - logrus.WithField("sig", sig).Debug("Exiting in response to signal") - cancel() - } - } - if quiet || !stdoutTTY { - e := logrus.WithFields(logrus.Fields{ - "t": engine.Executor.GetTime(), - "i": engine.Executor.GetIterations(), - }) - fn := e.Info - if quiet { - fn = e.Debug - } - fn("Test finished") - } else { - progress.Progress = 1 - fprintf(stdout, "%s\x1b[0K\n", progress.String()) - } + progressCancel() + progressBarWG.Wait() // Warn if no iterations could be completed. - if engine.Executor.GetIterations() == 0 { - logrus.Warn("No data generated, because no script iterations finished, consider making the test duration longer") + if executionState.GetFullIterationCount() == 0 { + logger.Warn("No script iterations finished, consider making the test duration longer") } data := ui.SummaryData{ Metrics: engine.Metrics, - RootGroup: engine.Executor.GetRunner().GetDefaultGroup(), - Time: engine.Executor.GetTime(), + RootGroup: engine.ExecutionScheduler.GetRunner().GetDefaultGroup(), + Time: executionState.GetCurrentTestRunDuration(), TimeUnit: conf.Options.SummaryTimeUnit.String, } // Print the end-of-test summary. @@ -474,9 +337,17 @@ a commandline interface for interacting with it.`, } if conf.Linger.Bool { - logrus.Info("Linger set; waiting for Ctrl+C...") - <-sigC + select { + case <-lingerCtx.Done(): + // do nothing, we were interrupted by Ctrl+C already + default: + logger.Info("Linger set; waiting for Ctrl+C...") + <-lingerCtx.Done() + } } + globalCancel() // signal the Engine that it should wind down + logger.Debug("Waiting for engine processes to finish...") + engineWait() if engine.IsTainted() { return ExitCode{error: errors.New("some thresholds have failed"), Code: thresholdHaveFailedErrorCode} @@ -485,6 +356,54 @@ a commandline interface for interacting with it.`, }, } +func getExitCodeFromEngine(err error) ExitCode { + switch e := errors.Cause(err).(type) { + case lib.TimeoutError: + switch e.Place() { + case consts.SetupFn: + return ExitCode{error: err, Code: setupTimeoutErrorCode, Hint: e.Hint()} + case consts.TeardownFn: + return ExitCode{error: err, Code: teardownTimeoutErrorCode, Hint: e.Hint()} + default: + return ExitCode{error: err, Code: genericTimeoutErrorCode} + } + default: + //nolint:golint + return ExitCode{error: errors.New("Engine error"), Code: genericEngineErrorCode, Hint: err.Error()} + } +} + +func reportUsage(execScheduler *local.ExecutionScheduler) error { + execState := execScheduler.GetState() + executorConfigs := execScheduler.GetExecutorConfigs() + + executors := make(map[string]int) + for _, ec := range executorConfigs { + executors[ec.GetType()]++ + } + + body, err := json.Marshal(map[string]interface{}{ + "k6_version": consts.Version, + "executors": executors, + "vus_max": execState.GetInitializedVUsCount(), + "iterations": execState.GetFullIterationCount(), + "duration": execState.GetCurrentTestRunDuration().String(), + "goos": runtime.GOOS, + "goarch": runtime.GOARCH, + }) + if err != nil { + return err + } + res, err := http.Post("https://reports.k6.io/", "application/json", bytes.NewBuffer(body)) + defer func() { + if err == nil { + _ = res.Body.Close() + } + }() + + return err +} + func runCmdFlagSet() *pflag.FlagSet { flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SortFlags = false @@ -492,7 +411,7 @@ func runCmdFlagSet() *pflag.FlagSet { flags.AddFlagSet(runtimeOptionFlagSet(true)) flags.AddFlagSet(configFlagSet()) - //TODO: Figure out a better way to handle the CLI flags: + // TODO: Figure out a better way to handle the CLI flags: // - the default values are specified in this way so we don't overwrire whatever // was specified via the environment variables // - but we need to manually specify the DefValue, since that's the default value @@ -501,11 +420,6 @@ func runCmdFlagSet() *pflag.FlagSet { // - and finally, global variables are not very testable... :/ flags.StringVarP(&runType, "type", "t", runType, "override file `type`, \"js\" or \"archive\"") flags.Lookup("type").DefValue = "" - flags.BoolVar(&runNoSetup, "no-setup", runNoSetup, "don't run setup()") - falseStr := "false" // avoiding goconst warnings... - flags.Lookup("no-setup").DefValue = falseStr - flags.BoolVar(&runNoTeardown, "no-teardown", runNoTeardown, "don't run teardown()") - flags.Lookup("no-teardown").DefValue = falseStr return flags } diff --git a/cmd/runtime_options.go b/cmd/runtime_options.go index a594f581aa0..edc1680a1a2 100644 --- a/cmd/runtime_options.go +++ b/cmd/runtime_options.go @@ -21,16 +21,21 @@ package cmd import ( - "os" "regexp" + "strconv" "strings" - "github.com/loadimpact/k6/lib" "github.com/pkg/errors" "github.com/spf13/pflag" "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" ) +// TODO: move this whole file out of the cmd package? maybe when fixing +// https://github.com/loadimpact/k6/issues/883, since this code is fairly +// self-contained and easily testable now, without any global dependencies... + var userEnvVarName = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*$`) func parseEnvKeyValue(kv string) (string, string) { @@ -40,9 +45,9 @@ func parseEnvKeyValue(kv string) (string, string) { return kv, "" } -func collectEnv() map[string]string { - env := make(map[string]string) - for _, kv := range os.Environ() { +func buildEnvMap(environ []string) map[string]string { + env := make(map[string]string, len(environ)) + for _, kv := range environ { k, v := parseEnvKeyValue(kv) env[k] = v } @@ -63,16 +68,35 @@ extended: base + Babel with ES2015 preset + core.js v2, return flags } -func getRuntimeOptions(flags *pflag.FlagSet) (lib.RuntimeOptions, error) { +func getRuntimeOptions(flags *pflag.FlagSet, environment map[string]string) (lib.RuntimeOptions, error) { opts := lib.RuntimeOptions{ IncludeSystemEnvVars: getNullBool(flags, "include-system-env-vars"), CompatibilityMode: getNullString(flags, "compatibility-mode"), Env: make(map[string]string), } - // If enabled, gather the actual system environment variables - if opts.IncludeSystemEnvVars.Bool { - opts.Env = collectEnv() + if !opts.CompatibilityMode.Valid { // If not explicitly set via CLI flags, look for an environment variable + if envVar, ok := environment["K6_COMPATIBILITY_MODE"]; ok { + opts.CompatibilityMode = null.StringFrom(envVar) + } + } + if _, err := lib.ValidateCompatibilityMode(opts.CompatibilityMode.String); err != nil { + // some early validation + return opts, err + } + + if !opts.IncludeSystemEnvVars.Valid { // If not explicitly set via CLI flags, look for an environment variable + if envVar, ok := environment["K6_INCLUDE_SYSTEM_ENV_VARS"]; ok { + val, err := strconv.ParseBool(envVar) + if err != nil { + return opts, err + } + opts.IncludeSystemEnvVars = null.BoolFrom(val) + } + } + + if opts.IncludeSystemEnvVars.Bool { // If enabled, gather the actual system environment variables + opts.Env = environment } // Set/overwrite environment variables with custom user-supplied values @@ -80,7 +104,6 @@ func getRuntimeOptions(flags *pflag.FlagSet) (lib.RuntimeOptions, error) { if err != nil { return opts, err } - for _, kv := range envVars { k, v := parseEnvKeyValue(kv) // Allow only alphanumeric ASCII variable names for now @@ -90,11 +113,5 @@ func getRuntimeOptions(flags *pflag.FlagSet) (lib.RuntimeOptions, error) { opts.Env[k] = v } - // Fallback to env - compatMode := opts.Env["K6_COMPATIBILITY_MODE"] - if !opts.CompatibilityMode.Valid && compatMode != "" { - opts.CompatibilityMode = null.StringFrom(compatMode) - } - return opts, nil } diff --git a/cmd/runtime_options_test.go b/cmd/runtime_options_test.go index 2321f691cb6..28bcead9a97 100644 --- a/cmd/runtime_options_test.go +++ b/cmd/runtime_options_test.go @@ -24,246 +24,253 @@ import ( "bytes" "fmt" "net/url" - "os" - "runtime" - "strings" "testing" - "github.com/loadimpact/k6/lib" - "github.com/loadimpact/k6/loader" "github.com/spf13/afero" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" -) + "gopkg.in/guregu/null.v3" -var envVars []string + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/loader" +) -func init() { - envVars = os.Environ() +type runtimeOptionsTestCase struct { + useSysEnv bool // Whether to include the system env vars by default (run) or not (cloud/archive/inspect) + expErr bool + cliFlags []string + systemEnv map[string]string + expEnv map[string]string + expCompatMode null.String } -type EnvVarTest struct { - name string - useSysEnv bool // Whether to include the system env vars by default (run) or not (cloud/archive/inspect) - systemEnv map[string]string - cliOpts []string - expErr bool - expEnv map[string]string -} +//nolint:gochecknoglobals +var ( + defaultCompatMode = null.NewString("extended", false) + baseCompatMode = null.NewString("base", true) + extendedCompatMode = null.NewString("extended", true) +) -var envVarTestCases = []EnvVarTest{ - { - "empty env", - true, - map[string]string{}, - []string{}, - false, - map[string]string{}, +var runtimeOptionsTestCases = map[string]runtimeOptionsTestCase{ //nolint:gochecknoglobals + "empty env": { + useSysEnv: true, + // everything else is empty + expCompatMode: defaultCompatMode, + }, + "disabled sys env by default": { + useSysEnv: false, + systemEnv: map[string]string{"test1": "val1"}, + expEnv: map[string]string{}, + expCompatMode: defaultCompatMode, + }, + "disabled sys env by default with ext compat mode": { + useSysEnv: false, + systemEnv: map[string]string{"test1": "val1", "K6_COMPATIBILITY_MODE": "extended"}, + expEnv: map[string]string{}, + expCompatMode: extendedCompatMode, }, - { - "disabled sys env by default", - false, - map[string]string{"test1": "val1"}, - []string{}, - false, - map[string]string{}, + "disabled sys env by cli 1": { + useSysEnv: true, + systemEnv: map[string]string{"test1": "val1", "K6_COMPATIBILITY_MODE": "base"}, + cliFlags: []string{"--include-system-env-vars=false"}, + expEnv: map[string]string{}, + expCompatMode: baseCompatMode, }, - { - "disabled sys env by cli 1", - true, - map[string]string{"test1": "val1"}, - []string{"--include-system-env-vars=false"}, - false, - map[string]string{}, + "disabled sys env by cli 2": { + useSysEnv: true, + systemEnv: map[string]string{"K6_INCLUDE_SYSTEM_ENV_VARS": "true", "K6_COMPATIBILITY_MODE": "extended"}, + cliFlags: []string{"--include-system-env-vars=0", "--compatibility-mode=base"}, + expEnv: map[string]string{}, + expCompatMode: baseCompatMode, }, - { - "disabled sys env by cli 2", - true, - map[string]string{"test1": "val1"}, - []string{"--include-system-env-vars=0"}, - false, - map[string]string{}, + "disabled sys env by env": { + useSysEnv: true, + systemEnv: map[string]string{"K6_INCLUDE_SYSTEM_ENV_VARS": "false", "K6_COMPATIBILITY_MODE": "extended"}, + expEnv: map[string]string{}, + expCompatMode: extendedCompatMode, }, - { - "enabled sys env by default", - true, - map[string]string{"test1": "val1"}, - []string{}, - false, - map[string]string{"test1": "val1"}, + "enabled sys env by env": { + useSysEnv: false, + systemEnv: map[string]string{"K6_INCLUDE_SYSTEM_ENV_VARS": "true", "K6_COMPATIBILITY_MODE": "extended"}, + expEnv: map[string]string{"K6_INCLUDE_SYSTEM_ENV_VARS": "true", "K6_COMPATIBILITY_MODE": "extended"}, + expCompatMode: extendedCompatMode, }, - { - "enabled sys env by cli 1", - false, - map[string]string{"test1": "val1"}, - []string{"--include-system-env-vars"}, - false, - map[string]string{"test1": "val1"}, + "enabled sys env by default": { + useSysEnv: true, + systemEnv: map[string]string{"test1": "val1"}, + cliFlags: []string{}, + expEnv: map[string]string{"test1": "val1"}, + expCompatMode: defaultCompatMode, }, - { - "enabled sys env by cli 2", - false, - map[string]string{"test1": "val1"}, - []string{"--include-system-env-vars=true"}, - false, - map[string]string{"test1": "val1"}, + "enabled sys env by cli 1": { + useSysEnv: false, + systemEnv: map[string]string{"test1": "val1"}, + cliFlags: []string{"--include-system-env-vars"}, + expEnv: map[string]string{"test1": "val1"}, + expCompatMode: defaultCompatMode, }, - { - "run only system env", - true, - map[string]string{"test1": "val1"}, - []string{}, - false, - map[string]string{"test1": "val1"}, + "enabled sys env by cli 2": { + useSysEnv: false, + systemEnv: map[string]string{"test1": "val1"}, + cliFlags: []string{"--include-system-env-vars=true"}, + expEnv: map[string]string{"test1": "val1"}, + expCompatMode: defaultCompatMode, }, - { - "mixed system and cli env", - true, - map[string]string{"test1": "val1", "test2": ""}, - []string{"--env", "test3=val3", "-e", "test4", "-e", "test5="}, - false, - map[string]string{"test1": "val1", "test2": "", "test3": "val3", "test4": "", "test5": ""}, + "run only system env": { + useSysEnv: true, + systemEnv: map[string]string{"test1": "val1"}, + cliFlags: []string{}, + expEnv: map[string]string{"test1": "val1"}, + expCompatMode: defaultCompatMode, }, - { - "mixed system and cli env 2", - false, - map[string]string{"test1": "val1", "test2": ""}, - []string{"--env", "test3=val3", "-e", "test4", "-e", "test5=", "--include-system-env-vars=1"}, - false, - map[string]string{"test1": "val1", "test2": "", "test3": "val3", "test4": "", "test5": ""}, + "mixed system and cli env": { + useSysEnv: true, + systemEnv: map[string]string{"test1": "val1", "test2": ""}, + cliFlags: []string{"--env", "test3=val3", "-e", "test4", "-e", "test5="}, + expEnv: map[string]string{"test1": "val1", "test2": "", "test3": "val3", "test4": "", "test5": ""}, + expCompatMode: defaultCompatMode, }, - { - "disabled system env with cli params", - false, - map[string]string{"test1": "val1"}, - []string{"-e", "test2=overwriten", "-e", "test2=val2"}, - false, - map[string]string{"test2": "val2"}, + "mixed system and cli env 2": { + useSysEnv: false, + systemEnv: map[string]string{"test1": "val1", "test2": ""}, + cliFlags: []string{"--env", "test3=val3", "-e", "test4", "-e", "test5=", "--include-system-env-vars=1"}, + expEnv: map[string]string{"test1": "val1", "test2": "", "test3": "val3", "test4": "", "test5": ""}, + expCompatMode: defaultCompatMode, }, - { - "overwriting system env with cli param", - true, - map[string]string{"test1": "val1sys"}, - []string{"--env", "test1=val1cli"}, - false, - map[string]string{"test1": "val1cli"}, + "disabled system env with cli params": { + useSysEnv: false, + systemEnv: map[string]string{"test1": "val1"}, + cliFlags: []string{"-e", "test2=overwriten", "-e", "test2=val2"}, + expEnv: map[string]string{"test2": "val2"}, + expCompatMode: defaultCompatMode, }, - { - "error invalid cli var name 1", - true, - map[string]string{}, - []string{"--env", "test a=error"}, - true, - map[string]string{}, + "overwriting system env with cli param": { + useSysEnv: true, + systemEnv: map[string]string{"test1": "val1sys"}, + cliFlags: []string{"--env", "test1=val1cli"}, + expEnv: map[string]string{"test1": "val1cli"}, + expCompatMode: defaultCompatMode, }, - { - "error invalid cli var name 2", - true, - map[string]string{}, - []string{"--env", "1var=error"}, - true, - map[string]string{}, + "error wrong compat mode env var value": { + systemEnv: map[string]string{"K6_COMPATIBILITY_MODE": "asdf"}, + expErr: true, }, - { - "error invalid cli var name 3", - true, - map[string]string{}, - []string{"--env", "уникод=unicode-disabled"}, - true, - map[string]string{}, + "error wrong compat mode cli flag value": { + cliFlags: []string{"--compatibility-mode", "whatever"}, + expErr: true, }, - { - "valid env vars with spaces", - true, - map[string]string{"test1": "value 1"}, - []string{"--env", "test2=value 2"}, - false, - map[string]string{"test1": "value 1", "test2": "value 2"}, + "error invalid cli var name 1": { + useSysEnv: true, + systemEnv: map[string]string{}, + cliFlags: []string{"--env", "test a=error"}, + expErr: true, + expEnv: map[string]string{}, + expCompatMode: defaultCompatMode, }, - { - "valid env vars with special chars", - true, - map[string]string{"test1": "value 1"}, - []string{"--env", "test2=value,2", "-e", `test3= , ,,, value, ,, 2!'@#,"`}, - false, - map[string]string{"test1": "value 1", "test2": "value,2", "test3": ` , ,,, value, ,, 2!'@#,"`}, + "error invalid cli var name 2": { + useSysEnv: true, + systemEnv: map[string]string{}, + cliFlags: []string{"--env", "1var=error"}, + expErr: true, + expEnv: map[string]string{}, + expCompatMode: defaultCompatMode, + }, + "error invalid cli var name 3": { + useSysEnv: true, + systemEnv: map[string]string{}, + cliFlags: []string{"--env", "уникод=unicode-disabled"}, + expErr: true, + expEnv: map[string]string{}, + expCompatMode: defaultCompatMode, + }, + "valid env vars with spaces": { + useSysEnv: true, + systemEnv: map[string]string{"test1": "value 1"}, + cliFlags: []string{"--env", "test2=value 2"}, + expEnv: map[string]string{"test1": "value 1", "test2": "value 2"}, + expCompatMode: defaultCompatMode, + }, + "valid env vars with special chars": { + useSysEnv: true, + systemEnv: map[string]string{"test1": "value 1"}, + cliFlags: []string{"--env", "test2=value,2", "-e", `test3= , ,,, value, ,, 2!'@#,"`}, + expEnv: map[string]string{"test1": "value 1", "test2": "value,2", "test3": ` , ,,, value, ,, 2!'@#,"`}, + expCompatMode: defaultCompatMode, }, } -func TestEnvVars(t *testing.T) { - for _, tc := range envVarTestCases { - t.Run(fmt.Sprintf("EnvVar test '%s'", tc.name), func(t *testing.T) { - os.Clearenv() - for key, val := range tc.systemEnv { - require.NoError(t, os.Setenv(key, val)) - } - flags := runtimeOptionFlagSet(tc.useSysEnv) - require.NoError(t, flags.Parse(tc.cliOpts)) +func testRuntimeOptionsCase(t *testing.T, tc runtimeOptionsTestCase) { + flags := runtimeOptionFlagSet(tc.useSysEnv) + require.NoError(t, flags.Parse(tc.cliFlags)) + + rtOpts, err := getRuntimeOptions(flags, tc.systemEnv) + if tc.expErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.EqualValues(t, tc.expEnv, rtOpts.Env) + assert.Equal(t, tc.expCompatMode, rtOpts.CompatibilityMode) - rtOpts, err := getRuntimeOptions(flags) - if tc.expErr { - require.Error(t, err) - return - } - require.NoError(t, err) - require.EqualValues(t, tc.expEnv, rtOpts.Env) + compatMode, err := lib.ValidateCompatibilityMode(rtOpts.CompatibilityMode.String) + require.NoError(t, err) - // Clear the env again so real system values don't accidentally pollute the end-to-end test - os.Clearenv() + jsCode := new(bytes.Buffer) + if compatMode == lib.CompatibilityModeExtended { + fmt.Fprint(jsCode, "export default function() {") + } else { + fmt.Fprint(jsCode, "module.exports.default = function() {") + } - jsCode := "export default function() {\n" - for key, val := range tc.expEnv { - jsCode += fmt.Sprintf( - "if (__ENV.%s !== `%s`) { throw new Error('Invalid %s: ' + __ENV.%s); }\n", - key, val, key, key, - ) - } - jsCode += "}" + for key, val := range tc.expEnv { + fmt.Fprintf(jsCode, + "if (__ENV.%s !== `%s`) { throw new Error('Invalid %s: ' + __ENV.%s); }", + key, val, key, key, + ) + } + fmt.Fprint(jsCode, "}") - // windows requires the environment variables to be loaded to gerenate the rand source - if runtime.GOOS == "windows" { - for _, e := range envVars { - parts := strings.Split(e, "=") - os.Setenv(parts[0], parts[1]) - } - } + fs := afero.NewMemMapFs() + require.NoError(t, afero.WriteFile(fs, "/script.js", jsCode.Bytes(), 0644)) + runner, err := newRunner( + &loader.SourceData{Data: jsCode.Bytes(), URL: &url.URL{Path: "/script.js", Scheme: "file"}}, + typeJS, + map[string]afero.Fs{"file": fs}, + rtOpts, + ) + require.NoError(t, err) - fs := afero.NewMemMapFs() - require.NoError(t, afero.WriteFile(fs, "/script.js", []byte(jsCode), 0644)) - runner, err := newRunner( - &loader.SourceData{ - Data: []byte(jsCode), - URL: &url.URL{Path: "/script.js", Scheme: "file"}, - }, - typeJS, - map[string]afero.Fs{"file": fs}, - rtOpts, - ) - require.NoError(t, err) + archive := runner.MakeArchive() + archiveBuf := &bytes.Buffer{} + require.NoError(t, archive.Write(archiveBuf)) - archive := runner.MakeArchive() - archiveBuf := &bytes.Buffer{} - assert.NoError(t, archive.Write(archiveBuf)) + getRunnerErr := func(rtOpts lib.RuntimeOptions) (lib.Runner, error) { + return newRunner( + &loader.SourceData{ + Data: archiveBuf.Bytes(), + URL: &url.URL{Path: "/script.js"}, + }, + typeArchive, + nil, + rtOpts, + ) + } - getRunnerErr := func(rtOpts lib.RuntimeOptions) (lib.Runner, error) { - return newRunner( - &loader.SourceData{ - Data: archiveBuf.Bytes(), - URL: &url.URL{Path: "/script.js"}, - }, - typeArchive, - nil, - rtOpts, - ) - } + _, err = getRunnerErr(lib.RuntimeOptions{}) + require.NoError(t, err) + for key, val := range tc.expEnv { + r, err := getRunnerErr(lib.RuntimeOptions{Env: map[string]string{key: "almost " + val}}) + assert.NoError(t, err) + assert.Equal(t, r.MakeArchive().Env[key], "almost "+val) + } +} - _, err = getRunnerErr(lib.RuntimeOptions{}) - require.NoError(t, err) - for key, val := range tc.expEnv { - r, err := getRunnerErr(lib.RuntimeOptions{Env: map[string]string{key: "almost " + val}}) - assert.NoError(t, err) - assert.Equal(t, r.MakeArchive().Env[key], "almost "+val) - } +func TestRuntimeOptions(t *testing.T) { + for name, tc := range runtimeOptionsTestCases { + tc := tc + t.Run(fmt.Sprintf("RuntimeOptions test '%s'", name), func(t *testing.T) { + t.Parallel() + testRuntimeOptionsCase(t, tc) }) } } diff --git a/cmd/scale.go b/cmd/scale.go index 23dbc91b0e5..0423bbadcae 100644 --- a/cmd/scale.go +++ b/cmd/scale.go @@ -23,11 +23,12 @@ package cmd import ( "context" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/loadimpact/k6/api/v1" "github.com/loadimpact/k6/api/v1/client" "github.com/loadimpact/k6/ui" - "github.com/pkg/errors" - "github.com/spf13/cobra" ) // scaleCmd represents the scale command diff --git a/cmd/stats.go b/cmd/stats.go index 2a77aaaa337..54889583448 100644 --- a/cmd/stats.go +++ b/cmd/stats.go @@ -23,9 +23,10 @@ package cmd import ( "context" + "github.com/spf13/cobra" + "github.com/loadimpact/k6/api/v1/client" "github.com/loadimpact/k6/ui" - "github.com/spf13/cobra" ) // statsCmd represents the stats command diff --git a/cmd/status.go b/cmd/status.go index 39910bfd106..e0fa62c0001 100644 --- a/cmd/status.go +++ b/cmd/status.go @@ -23,9 +23,10 @@ package cmd import ( "context" + "github.com/spf13/cobra" + "github.com/loadimpact/k6/api/v1/client" "github.com/loadimpact/k6/ui" - "github.com/spf13/cobra" ) // statusCmd represents the status command diff --git a/cmd/ui.go b/cmd/ui.go new file mode 100644 index 00000000000..70b272a16a4 --- /dev/null +++ b/cmd/ui.go @@ -0,0 +1,346 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package cmd + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/signal" + "strings" + "sync" + "time" + "unicode/utf8" + + "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh/terminal" + + "github.com/loadimpact/k6/core/local" + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/ui" + "github.com/loadimpact/k6/ui/pb" +) + +const ( + // Max length of left-side progress bar text before trimming is forced + maxLeftLength = 30 + // Amount of padding in chars between rendered progress + // bar text and right-side terminal window edge. + termPadding = 1 +) + +// A writer that syncs writes with a mutex and, if the output is a TTY, clears before newlines. +type consoleWriter struct { + Writer io.Writer + IsTTY bool + Mutex *sync.Mutex + + // Used for flicker-free persistent objects like the progressbars + PersistentText func() +} + +func (w *consoleWriter) Write(p []byte) (n int, err error) { + origLen := len(p) + if w.IsTTY { + // Add a TTY code to erase till the end of line with each new line + // TODO: check how cross-platform this is... + p = bytes.Replace(p, []byte{'\n'}, []byte{'\x1b', '[', '0', 'K', '\n'}, -1) + } + + w.Mutex.Lock() + n, err = w.Writer.Write(p) + if w.PersistentText != nil { + w.PersistentText() + } + w.Mutex.Unlock() + + if err != nil && n < origLen { + return n, err + } + return origLen, err +} + +func printBar(bar *pb.ProgressBar) { + end := "\n" + if stdout.IsTTY { + // If we're in a TTY, instead of printing the bar and going to the next + // line, erase everything till the end of the line and return to the + // start, so that the next print will overwrite the same line. + // + // TODO: check for cross platform support + end = "\x1b[0K\r" + } + rendered := bar.Render(0, 0) + // Only output the left and middle part of the progress bar + fprintf(stdout, "%s%s", rendered.String(), end) +} + +func modifyAndPrintBar(bar *pb.ProgressBar, options ...pb.ProgressBarOption) { + bar.Modify(options...) + printBar(bar) +} + +// Print execution description for both cloud and local execution. +// TODO: Clean this up as part of #1499 or #1427 +func printExecutionDescription( + execution, filename, output string, conf Config, et *lib.ExecutionTuple, + execPlan []lib.ExecutionStep, collectors []lib.Collector, +) { + fprintf(stdout, " execution: %s\n", ui.ValueColor.Sprint(execution)) + fprintf(stdout, " script: %s\n", ui.ValueColor.Sprint(filename)) + + if execution == "local" { + out := "-" + link := "" + + for idx, collector := range collectors { + if out != "-" { + out = out + "; " + conf.Out[idx] + } else { + out = conf.Out[idx] + } + + if l := collector.Link(); l != "" { + link = link + " (" + l + ")" + } + } + fprintf(stdout, " output: %s%s\n", ui.ValueColor.Sprint(out), ui.ExtraColor.Sprint(link)) + } else { + fprintf(stdout, " output: %s\n", ui.ValueColor.Sprint(output)) + } + fprintf(stdout, "\n") + + maxDuration, _ := lib.GetEndOffset(execPlan) + executorConfigs := conf.Scenarios.GetSortedConfigs() + + fprintf(stdout, " scenarios: %s\n", ui.ValueColor.Sprintf( + "(%.2f%%) %d executors, %d max VUs, %s max duration (incl. graceful stop):", + conf.ExecutionSegment.FloatLength()*100, len(executorConfigs), + lib.GetMaxPossibleVUs(execPlan), maxDuration.Round(100*time.Millisecond)), + ) + for _, ec := range executorConfigs { + fprintf(stdout, " * %s: %s\n", + ec.GetName(), ec.GetDescription(et)) + } + fprintf(stdout, "\n") +} + +//nolint: funlen +func renderMultipleBars( + isTTY, goBack bool, maxLeft, termWidth, widthDelta int, pbs []*pb.ProgressBar, +) (string, int) { + lineEnd := "\n" + if isTTY { + //TODO: check for cross platform support + lineEnd = "\x1b[K\n" // erase till end of line + } + + var ( + // Amount of times line lengths exceed termWidth. + // Needed to factor into the amount of lines to jump + // back with [A and avoid scrollback issues. + lineBreaks int + longestLine int + // Maximum length of each right side column except last, + // used to calculate the padding between columns. + maxRColumnLen = make([]int, 2) + pbsCount = len(pbs) + rendered = make([]pb.ProgressBarRender, pbsCount) + result = make([]string, pbsCount+2) + ) + + result[0] = lineEnd // start with an empty line + + // First pass to render all progressbars and get the maximum + // lengths of right-side columns. + for i, pb := range pbs { + rend := pb.Render(maxLeft, widthDelta) + for i := range rend.Right { + // Skip last column, since there's nothing to align after it (yet?). + if i == len(rend.Right)-1 { + break + } + if len(rend.Right[i]) > maxRColumnLen[i] { + maxRColumnLen[i] = len(rend.Right[i]) + } + } + rendered[i] = rend + } + + // Second pass to render final output, applying padding where needed + for i := range rendered { + rend := rendered[i] + if rend.Hijack != "" { + result[i+1] = rend.Hijack + lineEnd + runeCount := utf8.RuneCountInString(rend.Hijack) + lineBreaks += (runeCount - termPadding) / termWidth + continue + } + var leftText, rightText string + leftPadFmt := fmt.Sprintf("%%-%ds", maxLeft) + leftText = fmt.Sprintf(leftPadFmt, rend.Left) + for i := range rend.Right { + rpad := 0 + if len(maxRColumnLen) > i { + rpad = maxRColumnLen[i] + } + rightPadFmt := fmt.Sprintf(" %%-%ds", rpad+1) + rightText += fmt.Sprintf(rightPadFmt, rend.Right[i]) + } + // Get visible line length, without ANSI escape sequences (color) + status := fmt.Sprintf(" %s ", rend.Status()) + line := leftText + status + rend.Progress() + rightText + lineRuneCount := utf8.RuneCountInString(line) + if lineRuneCount > longestLine { + longestLine = lineRuneCount + } + lineBreaks += (lineRuneCount - termPadding) / termWidth + if !noColor { + rend.Color = true + status = fmt.Sprintf(" %s ", rend.Status()) + line = fmt.Sprintf(leftPadFmt+"%s%s%s", + rend.Left, status, rend.Progress(), rightText) + } + result[i+1] = line + lineEnd + } + + if isTTY && goBack { + // Clear screen and go back to the beginning + //TODO: check for cross platform support + result[pbsCount+1] = fmt.Sprintf("\r\x1b[J\x1b[%dA", pbsCount+lineBreaks+1) + } else { + result[pbsCount+1] = lineEnd + } + + return strings.Join(result, ""), longestLine +} + +//TODO: show other information here? +//TODO: add a no-progress option that will disable these +//TODO: don't use global variables... +// nolint:funlen +func showProgress( + ctx context.Context, conf Config, + execScheduler *local.ExecutionScheduler, logger *logrus.Logger, +) { + if quiet || conf.HTTPDebug.Valid && conf.HTTPDebug.String != "" { + return + } + + pbs := []*pb.ProgressBar{execScheduler.GetInitProgressBar()} + for _, s := range execScheduler.GetExecutors() { + pbs = append(pbs, s.GetProgress()) + } + + termWidth, _, err := terminal.GetSize(int(os.Stdout.Fd())) + if err != nil && stdoutTTY { + logger.WithError(err).Warn("error getting terminal size") + termWidth = 80 // TODO: something safer, return error? + } + + // Get the longest left side string length, to align progress bars + // horizontally and trim excess text. + var leftLen int64 + for _, pb := range pbs { + l := pb.Left() + leftLen = lib.Max(int64(len(l)), leftLen) + } + // Limit to maximum left text length + maxLeft := int(lib.Min(leftLen, maxLeftLength)) + + var progressBarsLastRender []byte + + printProgressBars := func() { + _, _ = stdout.Writer.Write(progressBarsLastRender) + } + + var widthDelta int + // Default to responsive progress bars when in an interactive terminal + renderProgressBars := func(goBack bool) { + barText, longestLine := renderMultipleBars(stdoutTTY, goBack, maxLeft, termWidth, widthDelta, pbs) + widthDelta = termWidth - longestLine - termPadding + progressBarsLastRender = []byte(barText) + } + + // Otherwise fallback to fixed compact progress bars + if !stdoutTTY { + widthDelta = -pb.DefaultWidth + renderProgressBars = func(goBack bool) { + barText, _ := renderMultipleBars(stdoutTTY, goBack, maxLeft, termWidth, widthDelta, pbs) + progressBarsLastRender = []byte(barText) + } + } + + //TODO: make configurable? + updateFreq := 1 * time.Second + //TODO: remove !noColor after we fix how we handle colors (see the related + //description in the TODO message in cmd/root.go) + if stdoutTTY && !noColor { + updateFreq = 100 * time.Millisecond + outMutex.Lock() + stdout.PersistentText = printProgressBars + stderr.PersistentText = printProgressBars + outMutex.Unlock() + defer func() { + outMutex.Lock() + stdout.PersistentText = nil + stderr.PersistentText = nil + outMutex.Unlock() + }() + } + + var ( + fd = int(os.Stdout.Fd()) + ticker = time.NewTicker(updateFreq) + ) + + var winch chan os.Signal + if sig := getWinchSignal(); sig != nil { + winch = make(chan os.Signal, 1) + signal.Notify(winch, sig) + } + + ctxDone := ctx.Done() + for { + select { + case <-ctxDone: + renderProgressBars(false) + outMutex.Lock() + printProgressBars() + outMutex.Unlock() + return + case <-winch: + // More responsive progress bar resizing on platforms with SIGWINCH (*nix) + termWidth, _, _ = terminal.GetSize(fd) + case <-ticker.C: + // Default ticker-based progress bar resizing + if winch == nil { + termWidth, _, _ = terminal.GetSize(fd) + } + } + renderProgressBars(true) + outMutex.Lock() + printProgressBars() + outMutex.Unlock() + } +} diff --git a/cmd/ui_test.go b/cmd/ui_test.go new file mode 100644 index 00000000000..c0971547074 --- /dev/null +++ b/cmd/ui_test.go @@ -0,0 +1,99 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package cmd + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/loadimpact/k6/ui/pb" +) + +// Return progressbars with different content lengths, to test for +// padding. +func createTestProgressBars(num, padding, colIdx int) []*pb.ProgressBar { + pbs := make([]*pb.ProgressBar, num) + for i := 0; i < num; i++ { + left := fmt.Sprintf("left %d", i) + rightCol1 := fmt.Sprintf("right %d", i) + progress := 0.0 + status := pb.Running + if i == colIdx { + pad := strings.Repeat("+", padding) + left += pad + rightCol1 += pad + progress = 1.0 + status = pb.Done + } + pbs[i] = pb.New( + pb.WithLeft(func() string { return left }), + pb.WithStatus(status), + pb.WithProgress(func() (float64, []string) { + return progress, []string{rightCol1, "000"} + }), + ) + } + return pbs +} + +func TestRenderMultipleBars(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + padding int + widthDelta int + expOut string + expLongLine int + }{ + {"pad0", 0, 0, ` +left 0 [--------------------------------------] right 0 000 +left 1 ✓ [======================================] right 1 000 +left 2 [--------------------------------------] right 2 000 + +`, 62}, + {"pad2", 2, 0, ` +left 0 [--------------------------------------] right 0 000 +left 1++ ✓ [======================================] right 1++ 000 +left 2 [--------------------------------------] right 2 000 + +`, 66}, + {"pad0compact", 0, -50, ` +left 0 [ 0% ] right 0 000 +left 1 ✓ [ 100% ] right 1 000 +left 2 [ 0% ] right 2 000 + +`, 30}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + pbs := createTestProgressBars(3, tc.padding, 1) + out, longestLine := renderMultipleBars(false, false, 6+tc.padding, 80, tc.widthDelta, pbs) + assert.Equal(t, tc.expOut, out) + assert.Equal(t, tc.expLongLine, longestLine) + }) + } +} diff --git a/cmd/ui_unix.go b/cmd/ui_unix.go new file mode 100644 index 00000000000..c12d4ed1ad3 --- /dev/null +++ b/cmd/ui_unix.go @@ -0,0 +1,32 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd + +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2020 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package cmd + +import ( + "os" + "syscall" +) + +func getWinchSignal() os.Signal { + return syscall.SIGWINCH +} diff --git a/cmd/configdir_go113.go b/cmd/ui_windows.go similarity index 85% rename from cmd/configdir_go113.go rename to cmd/ui_windows.go index 66281fcddf5..62cc92a6ea6 100644 --- a/cmd/configdir_go113.go +++ b/cmd/ui_windows.go @@ -1,9 +1,9 @@ -// +build go1.13 +// +build windows /* * * k6 - a next-generation load testing tool - * Copyright (C) 2019 Load Impact + * Copyright (C) 2020 Load Impact * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as @@ -22,8 +22,10 @@ package cmd -import "os" +import ( + "os" +) -func configDir() (string, error) { - return os.UserConfigDir() +func getWinchSignal() os.Signal { + return nil } diff --git a/cmd/version.go b/cmd/version.go index 811c44915bf..12e7d1c3ef2 100644 --- a/cmd/version.go +++ b/cmd/version.go @@ -23,8 +23,9 @@ package cmd import ( "fmt" - "github.com/loadimpact/k6/lib/consts" "github.com/spf13/cobra" + + "github.com/loadimpact/k6/lib/consts" ) // versionCmd represents the version command. diff --git a/converter/har/converter.go b/converter/har/converter.go index 14ac7d7f49d..df6b2e1cdff 100644 --- a/converter/har/converter.go +++ b/converter/har/converter.go @@ -30,9 +30,10 @@ import ( "sort" "strings" - "github.com/loadimpact/k6/lib" "github.com/pkg/errors" "github.com/tidwall/pretty" + + "github.com/loadimpact/k6/lib" ) // fprint panics when where's an error writing to the supplied io.Writer diff --git a/converter/har/converter_test.go b/converter/har/converter_test.go index fdf92cce8cc..e285a71b6f8 100644 --- a/converter/har/converter_test.go +++ b/converter/har/converter_test.go @@ -25,10 +25,11 @@ import ( "net/url" "testing" + "github.com/stretchr/testify/assert" + "github.com/loadimpact/k6/js" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/loader" - "github.com/stretchr/testify/assert" ) func TestBuildK6Headers(t *testing.T) { diff --git a/core/engine.go b/core/engine.go index fd4540b8bbc..36cd0609bbc 100644 --- a/core/engine.go +++ b/core/engine.go @@ -22,6 +22,7 @@ package core import ( "context" + "errors" "strings" "sync" "time" @@ -29,35 +30,39 @@ import ( "github.com/sirupsen/logrus" "gopkg.in/guregu/null.v3" - "github.com/loadimpact/k6/core/local" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/metrics" "github.com/loadimpact/k6/stats" ) const ( - TickRate = 1 * time.Millisecond - MetricsRate = 1 * time.Second - CollectRate = 50 * time.Millisecond - ThresholdsRate = 2 * time.Second - ShutdownTimeout = 10 * time.Second - - BackoffAmount = 50 * time.Millisecond - BackoffMax = 10 * time.Second + metricsRate = 1 * time.Second + collectRate = 50 * time.Millisecond + thresholdsRate = 2 * time.Second ) -// The Engine is the beating heart of K6. +// The Engine is the beating heart of k6. type Engine struct { - runLock sync.Mutex + // TODO: Make most of the stuff here private! And think how to refactor the + // engine to be less stateful... it's currently one big mess of moving + // pieces, and you implicitly first have to call Init() and then Run() - + // maybe we should refactor it so we have a `Session` dauther-object that + // Init() returns? The only problem with doing this is the REST API - it + // expects to be able to get information from the Engine and is initialized + // before the Init() call... + + ExecutionScheduler lib.ExecutionScheduler + executionState *lib.ExecutionState - Executor lib.Executor Options lib.Options Collectors []lib.Collector NoThresholds bool NoSummary bool SummaryExport bool - logger *logrus.Logger + logger *logrus.Entry + stopOnce sync.Once + stopChan chan struct{} Metrics map[string]*stats.Metric MetricsLock sync.Mutex @@ -72,29 +77,22 @@ type Engine struct { thresholdsTainted bool } -func NewEngine(ex lib.Executor, o lib.Options) (*Engine, error) { +// NewEngine instantiates a new Engine, without doing any heavy initialization. +func NewEngine(ex lib.ExecutionScheduler, o lib.Options, logger *logrus.Logger) (*Engine, error) { if ex == nil { - ex = local.New(nil) + return nil, errors.New("missing ExecutionScheduler instance") } e := &Engine{ - Executor: ex, + ExecutionScheduler: ex, + executionState: ex.GetState(), + Options: o, Metrics: make(map[string]*stats.Metric), Samples: make(chan stats.SampleContainer, o.MetricSamplesBufferSize.Int64), + stopChan: make(chan struct{}), + logger: logger.WithField("component", "engine"), } - e.SetLogger(logrus.StandardLogger()) - - if err := ex.SetVUsMax(o.VUsMax.Int64); err != nil { - return nil, err - } - if err := ex.SetVUs(o.VUs.Int64); err != nil { - return nil, err - } - ex.SetPaused(o.Paused.Bool) - ex.SetStages(o.Stages) - ex.SetEndTime(o.Duration) - ex.SetEndIterations(o.Iterations) e.thresholds = o.Thresholds e.submetrics = make(map[string][]*stats.Submetric) @@ -110,157 +108,223 @@ func NewEngine(ex lib.Executor, o lib.Options) (*Engine, error) { return e, nil } -func (e *Engine) setRunStatus(status lib.RunStatus) { - for _, c := range e.Collectors { - c.SetRunStatus(status) +// Init is used to initialize the execution scheduler and all metrics processing +// in the engine. The first is a costly operation, since it initializes all of +// the planned VUs and could potentially take a long time. It either returns an +// error immediately, or it returns test run() and wait() functions. +// +// Things to note: +// - The first lambda, Run(), synchronously executes the actual load test. +// - It can be prematurely aborted by cancelling the runCtx - this won't stop +// the metrics collection by the Engine. +// - Stopping the metrics collection can be done at any time after Run() has +// returned by cancelling the globalCtx +// - The second returned lambda can be used to wait for that process to finish. +func (e *Engine) Init(globalCtx, runCtx context.Context) (run func() error, wait func(), err error) { + e.logger.Debug("Initialization starting...") + // TODO: if we ever need metrics processing in the init context, we can move + // this below the other components... or even start them concurrently? + if err := e.ExecutionScheduler.Init(runCtx, e.Samples); err != nil { + return nil, nil, err } -} -func (e *Engine) Run(ctx context.Context) error { - e.runLock.Lock() - defer e.runLock.Unlock() + // TODO: move all of this in a separate struct? see main TODO above - e.logger.Debug("Engine: Starting with parameters...") - for i, st := range e.Executor.GetStages() { - fields := make(logrus.Fields) - if st.Target.Valid { - fields["tgt"] = st.Target.Int64 - } - if st.Duration.Valid { - fields["d"] = st.Duration.Duration + runSubCtx, runSubCancel := context.WithCancel(runCtx) + + resultCh := make(chan error) + processMetricsAfterRun := make(chan struct{}) + runFn := func() error { + e.logger.Debug("Execution scheduler starting...") + err := e.ExecutionScheduler.Run(globalCtx, runSubCtx, e.Samples) + e.logger.WithError(err).Debug("Execution scheduler terminated") + + select { + case <-runSubCtx.Done(): + // do nothing, the test run was aborted somehow + default: + resultCh <- err // we finished normally, so send the result } - e.logger.WithFields(fields).Debugf(" - stage #%d", i) - } - fields := make(logrus.Fields) - if endTime := e.Executor.GetEndTime(); endTime.Valid { - fields["time"] = endTime.Duration - } - if endIter := e.Executor.GetEndIterations(); endIter.Valid { - fields["iter"] = endIter.Int64 + // Make the background jobs process the currently buffered metrics and + // run the thresholds, then wait for that to be done. + processMetricsAfterRun <- struct{}{} + <-processMetricsAfterRun + + return err } - e.logger.WithFields(fields).Debug(" - end conditions (if any)") + waitFn := e.startBackgroundProcesses(globalCtx, runCtx, resultCh, runSubCancel, processMetricsAfterRun) + return runFn, waitFn, nil +} - collectorwg := sync.WaitGroup{} - collectorctx, collectorcancel := context.WithCancel(context.Background()) +// This starts a bunch of goroutines to process metrics, thresholds, and set the +// test run status when it ends. It returns a function that can be used after +// the provided context is called, to wait for the complete winding down of all +// started goroutines. +func (e *Engine) startBackgroundProcesses( //nolint:funlen + globalCtx, runCtx context.Context, runResult <-chan error, runSubCancel func(), processMetricsAfterRun chan struct{}, +) (wait func()) { + processes := new(sync.WaitGroup) + // Spin up all configured collectors for _, collector := range e.Collectors { - collectorwg.Add(1) + processes.Add(1) go func(collector lib.Collector) { - collector.Run(collectorctx) - collectorwg.Done() + collector.Run(globalCtx) + processes.Done() }(collector) } - subctx, subcancel := context.WithCancel(context.Background()) - subwg := sync.WaitGroup{} + // Siphon and handle all produced metric samples + processes.Add(1) + go func() { + defer processes.Done() + e.processMetrics(globalCtx, processMetricsAfterRun) + }() + + // Run VU metrics emission, only while the test is running. + // TODO: move? this seems like something the ExecutionScheduler should emit... + processes.Add(1) + go func() { + defer processes.Done() + e.logger.Debug("Starting emission of VU metrics...") + e.runMetricsEmission(runCtx) + e.logger.Debug("Metrics emission terminated") + }() - // Run metrics emission. - subwg.Add(1) + // Update the test run status when the test finishes + processes.Add(1) + thresholdAbortChan := make(chan struct{}) go func() { - e.runMetricsEmission(subctx) - e.logger.Debug("Engine: Emission terminated") - subwg.Done() + defer processes.Done() + select { + case err := <-runResult: + if err != nil { + e.logger.WithError(err).Debug("run: execution scheduler returned an error") + e.setRunStatus(lib.RunStatusAbortedSystem) + } else { + e.logger.Debug("run: execution scheduler terminated") + e.setRunStatus(lib.RunStatusFinished) + } + case <-runCtx.Done(): + e.logger.Debug("run: context expired; exiting...") + e.setRunStatus(lib.RunStatusAbortedUser) + case <-e.stopChan: + runSubCancel() + e.logger.Debug("run: stopped by user; exiting...") + e.setRunStatus(lib.RunStatusAbortedUser) + case <-thresholdAbortChan: + e.logger.Debug("run: stopped by thresholds; exiting...") + runSubCancel() + e.setRunStatus(lib.RunStatusAbortedThreshold) + } }() - // Run thresholds. + // Run thresholds, if not disabled. if !e.NoThresholds { - subwg.Add(1) + processes.Add(1) go func() { - e.runThresholds(subctx, subcancel) - e.logger.Debug("Engine: Thresholds terminated") - subwg.Done() + defer processes.Done() + defer e.logger.Debug("Engine: Thresholds terminated") + ticker := time.NewTicker(thresholdsRate) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if e.processThresholds() { + close(thresholdAbortChan) + return + } + case <-runCtx.Done(): + return + } + } }() } - // Run the executor. - errC := make(chan error) - subwg.Add(1) - go func() { - errC <- e.Executor.Run(subctx, e.Samples) - e.logger.Debug("Engine: Executor terminated") - subwg.Done() - }() + return processes.Wait +} +func (e *Engine) processMetrics(globalCtx context.Context, processMetricsAfterRun chan struct{}) { sampleContainers := []stats.SampleContainer{} - defer func() { - // Shut down subsystems. - subcancel() - // Process samples until the subsystems have shut down. - // Filter out samples produced past the end of a test. - go func() { - if errC != nil { - <-errC - errC = nil - } - subwg.Wait() - close(e.Samples) - }() + defer func() { + // Process any remaining metrics in the pipeline, by this point Run() + // has already finished and nothing else should be producing metrics. + e.logger.Debug("Metrics processing winding down...") + close(e.Samples) for sc := range e.Samples { sampleContainers = append(sampleContainers, sc) } - e.processSamples(sampleContainers) - // Emit final metrics. - e.emitMetrics() - - // Process final thresholds. if !e.NoThresholds { - e.processThresholds(nil) + e.processThresholds() // Process the thresholds one final time } - - // Finally, shut down collector. - collectorcancel() - collectorwg.Wait() }() - ticker := time.NewTicker(CollectRate) + ticker := time.NewTicker(collectRate) + defer ticker.Stop() + + e.logger.Debug("Metrics processing started...") + processSamples := func() { + if len(sampleContainers) > 0 { + e.processSamples(sampleContainers) + // Make the new container with the same size as the previous + // one, assuming that we produce roughly the same amount of + // metrics data between ticks... + sampleContainers = make([]stats.SampleContainer, 0, cap(sampleContainers)) + } + } for { select { case <-ticker.C: - if len(sampleContainers) > 0 { - e.processSamples(sampleContainers) - sampleContainers = []stats.SampleContainer{} - } + processSamples() + case <-processMetricsAfterRun: + e.logger.Debug("Processing metrics and thresholds after the test run has ended...") + processSamples() + e.processThresholds() + processMetricsAfterRun <- struct{}{} + case sc := <-e.Samples: sampleContainers = append(sampleContainers, sc) - case err := <-errC: - errC = nil - if err != nil { - e.logger.WithError(err).Debug("run: executor returned an error") - e.setRunStatus(lib.RunStatusAbortedSystem) - return err - } - e.logger.Debug("run: executor terminated") - return nil - case <-ctx.Done(): - e.logger.Debug("run: context expired; exiting...") - e.setRunStatus(lib.RunStatusAbortedUser) - return nil + case <-globalCtx.Done(): + return } } } +func (e *Engine) setRunStatus(status lib.RunStatus) { + for _, c := range e.Collectors { + c.SetRunStatus(status) + } +} + func (e *Engine) IsTainted() bool { return e.thresholdsTainted } -// SetLogger sets Engine's loggger. -func (e *Engine) SetLogger(l *logrus.Logger) { - e.logger = l - e.Executor.SetLogger(l) +// Stop closes a signal channel, forcing a running Engine to return +func (e *Engine) Stop() { + e.stopOnce.Do(func() { + close(e.stopChan) + }) } -// GetLogger returns Engine's current logger. -func (e *Engine) GetLogger() *logrus.Logger { - return e.logger +// IsStopped returns a bool indicating whether the Engine has been stopped +func (e *Engine) IsStopped() bool { + select { + case <-e.stopChan: + return true + default: + return false + } } func (e *Engine) runMetricsEmission(ctx context.Context) { - ticker := time.NewTicker(MetricsRate) + ticker := time.NewTicker(metricsRate) for { select { case <-ticker.C: @@ -274,17 +338,19 @@ func (e *Engine) runMetricsEmission(ctx context.Context) { func (e *Engine) emitMetrics() { t := time.Now() + executionState := e.ExecutionScheduler.GetState() + // TODO: optimize and move this, it shouldn't call processSamples() directly e.processSamples([]stats.SampleContainer{stats.ConnectedSamples{ Samples: []stats.Sample{ { Time: t, Metric: metrics.VUs, - Value: float64(e.Executor.GetVUs()), + Value: float64(executionState.GetCurrentlyActiveVUsCount()), Tags: e.Options.RunTags, }, { Time: t, Metric: metrics.VUsMax, - Value: float64(e.Executor.GetVUsMax()), + Value: float64(executionState.GetInitializedVUsCount()), Tags: e.Options.RunTags, }, }, @@ -293,24 +359,11 @@ func (e *Engine) emitMetrics() { }}) } -func (e *Engine) runThresholds(ctx context.Context, abort func()) { - ticker := time.NewTicker(ThresholdsRate) - for { - select { - case <-ticker.C: - e.processThresholds(abort) - case <-ctx.Done(): - return - } - } -} - -func (e *Engine) processThresholds(abort func()) { +func (e *Engine) processThresholds() (shouldAbort bool) { e.MetricsLock.Lock() defer e.MetricsLock.Unlock() - t := e.Executor.GetTime() - abortOnFail := false + t := e.executionState.GetCurrentTestRunDuration() e.thresholdsTainted = false for _, m := range e.Metrics { @@ -329,22 +382,18 @@ func (e *Engine) processThresholds(abort func()) { e.logger.WithField("m", m.Name).Debug("Thresholds failed") m.Tainted = null.BoolFrom(true) e.thresholdsTainted = true - if !abortOnFail && m.Thresholds.Abort { - abortOnFail = true + if m.Thresholds.Abort { + shouldAbort = true } } } - if abortOnFail && abort != nil { - //TODO: When sending this status we get a 422 Unprocessable Entity - e.setRunStatus(lib.RunStatusAbortedThreshold) - abort() - } + return shouldAbort } -func (e *Engine) processSamplesForMetrics(sampleCointainers []stats.SampleContainer) { - for _, sampleCointainer := range sampleCointainers { - samples := sampleCointainer.GetSamples() +func (e *Engine) processSamplesForMetrics(sampleContainers []stats.SampleContainer) { + for _, sampleContainer := range sampleContainers { + samples := sampleContainer.GetSamples() if len(samples) == 0 { continue diff --git a/core/engine_test.go b/core/engine_test.go index 852600e109c..6c6d2c8f9b0 100644 --- a/core/engine_test.go +++ b/core/engine_test.go @@ -29,16 +29,18 @@ import ( "time" "github.com/sirupsen/logrus" - logtest "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" "github.com/loadimpact/k6/core/local" "github.com/loadimpact/k6/js" "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/executor" "github.com/loadimpact/k6/lib/metrics" + "github.com/loadimpact/k6/lib/testutils" "github.com/loadimpact/k6/lib/testutils/httpmultibin" + "github.com/loadimpact/k6/lib/testutils/minirunner" "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/loader" "github.com/loadimpact/k6/stats" @@ -47,241 +49,113 @@ import ( const isWindows = runtime.GOOS == "windows" -// Apply a null logger to the engine and return the hook. -func applyNullLogger(e *Engine) *logtest.Hook { - logger, hook := logtest.NewNullLogger() - e.SetLogger(logger) - return hook -} - -// Wrapper around newEngine that applies a null logger. -func newTestEngine(ex lib.Executor, opts lib.Options) (*Engine, error) { - if !opts.MetricSamplesBufferSize.Valid { - opts.MetricSamplesBufferSize = null.IntFrom(200) +// Wrapper around NewEngine that applies a logger and manages the options. +func newTestEngine( //nolint:golint + t *testing.T, runCtx context.Context, runner lib.Runner, collectors []lib.Collector, opts lib.Options, +) (engine *Engine, run func() error, wait func()) { + if runner == nil { + runner = &minirunner.MiniRunner{} } - e, err := NewEngine(ex, opts) - if err != nil { - return e, err + globalCtx, globalCancel := context.WithCancel(context.Background()) + var runCancel func() + if runCtx == nil { + runCtx, runCancel = context.WithCancel(globalCtx) } - applyNullLogger(e) - return e, nil -} -func LF(fn func(ctx context.Context, out chan<- stats.SampleContainer) error) lib.Executor { - return local.New(&lib.MiniRunner{Fn: fn}) -} + newOpts, err := executor.DeriveScenariosFromShortcuts(lib.Options{ + MetricSamplesBufferSize: null.NewInt(200, false), + }.Apply(runner.GetOptions()).Apply(opts)) + require.NoError(t, err) + require.Empty(t, newOpts.Validate()) -func TestNewEngine(t *testing.T) { - _, err := newTestEngine(nil, lib.Options{}) - assert.NoError(t, err) -} + require.NoError(t, runner.SetOptions(newOpts)) -func TestNewEngineOptions(t *testing.T) { - t.Run("Duration", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{ - Duration: types.NullDurationFrom(10 * time.Second), - }) - assert.NoError(t, err) - assert.Nil(t, e.Executor.GetStages()) - assert.Equal(t, types.NullDurationFrom(10*time.Second), e.Executor.GetEndTime()) - - t.Run("Infinite", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{Duration: types.NullDuration{}}) - assert.NoError(t, err) - assert.Nil(t, e.Executor.GetStages()) - assert.Equal(t, types.NullDuration{}, e.Executor.GetEndTime()) - }) - }) - t.Run("Stages", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{ - Stages: []lib.Stage{ - {Duration: types.NullDurationFrom(10 * time.Second), Target: null.IntFrom(10)}, - }, - }) - assert.NoError(t, err) - if assert.Len(t, e.Executor.GetStages(), 1) { - assert.Equal(t, e.Executor.GetStages()[0], lib.Stage{Duration: types.NullDurationFrom(10 * time.Second), Target: null.IntFrom(10)}) - } - }) - t.Run("Stages/Duration", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{ - Duration: types.NullDurationFrom(60 * time.Second), - Stages: []lib.Stage{ - {Duration: types.NullDurationFrom(10 * time.Second), Target: null.IntFrom(10)}, - }, - }) - assert.NoError(t, err) - if assert.Len(t, e.Executor.GetStages(), 1) { - assert.Equal(t, e.Executor.GetStages()[0], lib.Stage{Duration: types.NullDurationFrom(10 * time.Second), Target: null.IntFrom(10)}) + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + + execScheduler, err := local.NewExecutionScheduler(runner, logger) + require.NoError(t, err) + + engine, err = NewEngine(execScheduler, opts, logger) + require.NoError(t, err) + + engine.Collectors = collectors + + run, waitFn, err := engine.Init(globalCtx, runCtx) + require.NoError(t, err) + + return engine, run, func() { + if runCancel != nil { + runCancel() } - assert.Equal(t, types.NullDurationFrom(60*time.Second), e.Executor.GetEndTime()) - }) - t.Run("Iterations", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{Iterations: null.IntFrom(100)}) - assert.NoError(t, err) - assert.Equal(t, null.IntFrom(100), e.Executor.GetEndIterations()) - }) - t.Run("VUsMax", func(t *testing.T) { - t.Run("not set", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{}) - assert.NoError(t, err) - assert.Equal(t, int64(0), e.Executor.GetVUsMax()) - assert.Equal(t, int64(0), e.Executor.GetVUs()) - }) - t.Run("set", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{ - VUsMax: null.IntFrom(10), - }) - assert.NoError(t, err) - assert.Equal(t, int64(10), e.Executor.GetVUsMax()) - assert.Equal(t, int64(0), e.Executor.GetVUs()) - }) - }) - t.Run("VUs", func(t *testing.T) { - t.Run("no max", func(t *testing.T) { - _, err := newTestEngine(nil, lib.Options{ - VUs: null.IntFrom(10), - }) - assert.EqualError(t, err, "can't raise vu count (to 10) above vu cap (0)") - }) - t.Run("negative max", func(t *testing.T) { - _, err := newTestEngine(nil, lib.Options{ - VUsMax: null.IntFrom(-1), - }) - assert.EqualError(t, err, "vu cap can't be negative") - }) - t.Run("max too low", func(t *testing.T) { - _, err := newTestEngine(nil, lib.Options{ - VUsMax: null.IntFrom(1), - VUs: null.IntFrom(10), - }) - assert.EqualError(t, err, "can't raise vu count (to 10) above vu cap (1)") - }) - t.Run("max higher", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{ - VUsMax: null.IntFrom(10), - VUs: null.IntFrom(1), - }) - assert.NoError(t, err) - assert.Equal(t, int64(10), e.Executor.GetVUsMax()) - assert.Equal(t, int64(1), e.Executor.GetVUs()) - }) - t.Run("max just right", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{ - VUsMax: null.IntFrom(10), - VUs: null.IntFrom(10), - }) - assert.NoError(t, err) - assert.Equal(t, int64(10), e.Executor.GetVUsMax()) - assert.Equal(t, int64(10), e.Executor.GetVUs()) - }) - }) - t.Run("Paused", func(t *testing.T) { - t.Run("not set", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{}) - assert.NoError(t, err) - assert.False(t, e.Executor.IsPaused()) - }) - t.Run("false", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{ - Paused: null.BoolFrom(false), - }) - assert.NoError(t, err) - assert.False(t, e.Executor.IsPaused()) - }) - t.Run("true", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{ - Paused: null.BoolFrom(true), - }) - assert.NoError(t, err) - assert.True(t, e.Executor.IsPaused()) - }) - }) - t.Run("thresholds", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{ - Thresholds: map[string]stats.Thresholds{ - "my_metric": {}, - }, - }) - assert.NoError(t, err) - assert.Contains(t, e.thresholds, "my_metric") + globalCancel() + waitFn() + } +} - t.Run("submetrics", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{ - Thresholds: map[string]stats.Thresholds{ - "my_metric{tag:value}": {}, - }, - }) - assert.NoError(t, err) - assert.Contains(t, e.thresholds, "my_metric{tag:value}") - assert.Contains(t, e.submetrics, "my_metric") - }) - }) +func TestNewEngine(t *testing.T) { + newTestEngine(t, nil, nil, nil, lib.Options{}) } func TestEngineRun(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) t.Run("exits with context", func(t *testing.T) { - duration := 100 * time.Millisecond - e, err := newTestEngine(nil, lib.Options{}) - assert.NoError(t, err) + done := make(chan struct{}) + runner := &minirunner.MiniRunner{Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { + <-ctx.Done() + close(done) + return nil + }} + duration := 100 * time.Millisecond ctx, cancel := context.WithTimeout(context.Background(), duration) defer cancel() + + _, run, wait := newTestEngine(t, ctx, runner, nil, lib.Options{}) + defer wait() + startTime := time.Now() - assert.NoError(t, e.Run(ctx)) + assert.NoError(t, run()) assert.WithinDuration(t, startTime.Add(duration), time.Now(), 100*time.Millisecond) + <-done }) t.Run("exits with executor", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{ + e, run, wait := newTestEngine(t, nil, nil, nil, lib.Options{ VUs: null.IntFrom(10), - VUsMax: null.IntFrom(10), Iterations: null.IntFrom(100), }) - assert.NoError(t, err) - assert.NoError(t, e.Run(context.Background())) - assert.Equal(t, int64(100), e.Executor.GetIterations()) + defer wait() + assert.NoError(t, run()) + assert.Equal(t, uint64(100), e.ExecutionScheduler.GetState().GetFullIterationCount()) }) - // Make sure samples are discarded after context close (using "cutoff" timestamp in local.go) t.Run("collects samples", func(t *testing.T) { testMetric := stats.New("test_metric", stats.Trend) signalChan := make(chan interface{}) - var e *Engine - e, err := newTestEngine(LF(func(ctx context.Context, samples chan<- stats.SampleContainer) error { - samples <- stats.Sample{Metric: testMetric, Time: time.Now(), Value: 1} + + runner := &minirunner.MiniRunner{Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { + stats.PushIfNotDone(ctx, out, stats.Sample{Metric: testMetric, Time: time.Now(), Value: 1}) close(signalChan) <-ctx.Done() - - // HACK(robin): Add a sleep here to temporarily workaround two problems with this test: - // 1. The sample times are compared against the `cutoff` in core/local/local.go and sometimes the - // second sample (below) gets a `Time` smaller than `cutoff` because the lines below get executed - // before the `<-ctx.Done()` select in local.go:Run() on multi-core systems where - // goroutines can run in parallel. - // 2. Sometimes the `case samples := <-vuOut` gets selected before the `<-ctx.Done()` in - // core/local/local.go:Run() causing all samples from this mocked "RunOnce()" function to be accepted. - time.Sleep(time.Millisecond * 10) - samples <- stats.Sample{Metric: testMetric, Time: time.Now(), Value: 2} + stats.PushIfNotDone(ctx, out, stats.Sample{Metric: testMetric, Time: time.Now(), Value: 1}) return nil - }), lib.Options{ - VUs: null.IntFrom(1), - VUsMax: null.IntFrom(1), - Iterations: null.IntFrom(1), - }) - if !assert.NoError(t, err) { - return - } + }} c := &dummy.Collector{} - e.Collectors = []lib.Collector{c} ctx, cancel := context.WithCancel(context.Background()) + _, run, wait := newTestEngine(t, ctx, runner, []lib.Collector{c}, lib.Options{ + VUs: null.IntFrom(1), + Iterations: null.IntFrom(1), + }) + errC := make(chan error) - go func() { errC <- e.Run(ctx) }() + go func() { errC <- run() }() <-signalChan cancel() assert.NoError(t, <-errC) + wait() found := 0 for _, s := range c.Samples { @@ -296,27 +170,49 @@ func TestEngineRun(t *testing.T) { } func TestEngineAtTime(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{}) - assert.NoError(t, err) + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + _, run, wait := newTestEngine(t, ctx, nil, nil, lib.Options{ + VUs: null.IntFrom(2), + Duration: types.NullDurationFrom(20 * time.Second), + }) + defer wait() + assert.NoError(t, run()) +} + +func TestEngineStopped(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) defer cancel() - assert.NoError(t, e.Run(ctx)) + e, run, wait := newTestEngine(t, ctx, nil, nil, lib.Options{ + VUs: null.IntFrom(1), + Duration: types.NullDurationFrom(20 * time.Second), + }) + defer wait() + + assert.NoError(t, run()) + assert.Equal(t, false, e.IsStopped(), "engine should be running") + e.Stop() + assert.Equal(t, true, e.IsStopped(), "engine should be stopped") + e.Stop() // test that a second stop doesn't panic } func TestEngineCollector(t *testing.T) { testMetric := stats.New("test_metric", stats.Trend) - e, err := newTestEngine(LF(func(ctx context.Context, out chan<- stats.SampleContainer) error { + runner := &minirunner.MiniRunner{Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { out <- stats.Sample{Metric: testMetric} return nil - }), lib.Options{VUs: null.IntFrom(1), VUsMax: null.IntFrom(1), Iterations: null.IntFrom(1)}) - assert.NoError(t, err) + }} c := &dummy.Collector{} - e.Collectors = []lib.Collector{c} + e, run, wait := newTestEngine(t, nil, runner, []lib.Collector{c}, lib.Options{ + VUs: null.IntFrom(1), + Iterations: null.IntFrom(1), + }) - assert.NoError(t, e.Run(context.Background())) + assert.NoError(t, run()) + wait() cSamples := []stats.Sample{} for _, sample := range c.Samples { @@ -339,8 +235,8 @@ func TestEngine_processSamples(t *testing.T) { metric := stats.New("my_metric", stats.Gauge) t.Run("metric", func(t *testing.T) { - e, err := newTestEngine(nil, lib.Options{}) - assert.NoError(t, err) + e, _, wait := newTestEngine(t, nil, nil, nil, lib.Options{}) + defer wait() e.processSamples( []stats.SampleContainer{stats.Sample{Metric: metric, Value: 1.25, Tags: stats.IntoSampleTags(&map[string]string{"a": "1"})}}, @@ -352,12 +248,12 @@ func TestEngine_processSamples(t *testing.T) { ths, err := stats.NewThresholds([]string{`1+1==2`}) assert.NoError(t, err) - e, err := newTestEngine(nil, lib.Options{ + e, _, wait := newTestEngine(t, nil, nil, nil, lib.Options{ Thresholds: map[string]stats.Thresholds{ "my_metric{a:1}": ths, }, }) - assert.NoError(t, err) + defer wait() sms := e.submetrics["my_metric"] assert.Len(t, sms, 1) @@ -373,62 +269,54 @@ func TestEngine_processSamples(t *testing.T) { }) } -func TestEngine_runThresholds(t *testing.T) { +func TestEngineThresholdsWillAbort(t *testing.T) { metric := stats.New("my_metric", stats.Gauge) - thresholds := make(map[string]stats.Thresholds, 1) ths, err := stats.NewThresholds([]string{"1+1==3"}) assert.NoError(t, err) + ths.Thresholds[0].AbortOnFail = true - t.Run("aborted", func(t *testing.T) { - ths.Thresholds[0].AbortOnFail = true - thresholds[metric.Name] = ths - e, err := newTestEngine(nil, lib.Options{Thresholds: thresholds}) - assert.NoError(t, err) - - e.processSamples( - []stats.SampleContainer{stats.Sample{Metric: metric, Value: 1.25, Tags: stats.IntoSampleTags(&map[string]string{"a": "1"})}}, - ) + thresholds := map[string]stats.Thresholds{metric.Name: ths} - ctx, cancel := context.WithCancel(context.Background()) - aborted := false + e, _, wait := newTestEngine(t, nil, nil, nil, lib.Options{Thresholds: thresholds}) + defer wait() - cancelFunc := func() { - cancel() - aborted = true - } + e.processSamples( + []stats.SampleContainer{stats.Sample{Metric: metric, Value: 1.25, Tags: stats.IntoSampleTags(&map[string]string{"a": "1"})}}, + ) + assert.True(t, e.processThresholds()) +} - e.runThresholds(ctx, cancelFunc) +func TestEngineAbortedByThresholds(t *testing.T) { + metric := stats.New("my_metric", stats.Gauge) - assert.True(t, aborted) - }) + ths, err := stats.NewThresholds([]string{"1+1==3"}) + assert.NoError(t, err) + ths.Thresholds[0].AbortOnFail = true - t.Run("canceled", func(t *testing.T) { - ths.Abort = false - thresholds[metric.Name] = ths - e, err := newTestEngine(nil, lib.Options{Thresholds: thresholds}) - assert.NoError(t, err) + thresholds := map[string]stats.Thresholds{metric.Name: ths} - e.processSamples( - []stats.SampleContainer{stats.Sample{Metric: metric, Value: 1.25, Tags: stats.IntoSampleTags(&map[string]string{"a": "1"})}}, - ) + done := make(chan struct{}) + runner := &minirunner.MiniRunner{Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { + out <- stats.Sample{Metric: metric, Value: 1.25, Tags: stats.IntoSampleTags(&map[string]string{"a": "1"})} + <-ctx.Done() + close(done) + return nil + }} - ctx, cancel := context.WithCancel(context.Background()) - cancel() + _, run, wait := newTestEngine(t, nil, runner, nil, lib.Options{Thresholds: thresholds}) + defer wait() - done := make(chan struct{}) - go func() { - defer close(done) - e.runThresholds(ctx, cancel) - }() + go func() { + assert.NoError(t, run()) + }() - select { - case <-done: - return - case <-time.After(1 * time.Second): - assert.Fail(t, "Test should have completed within a second") - } - }) + select { + case <-done: + return + case <-time.After(10 * time.Second): + assert.Fail(t, "Test should have completed within 10 seconds") + } } func TestEngine_processThresholds(t *testing.T) { @@ -450,6 +338,7 @@ func TestEngine_processThresholds(t *testing.T) { } for name, data := range testdata { + name, data := name, data t.Run(name, func(t *testing.T) { thresholds := make(map[string]stats.Thresholds, len(data.ths)) for m, srcs := range data.ths { @@ -459,25 +348,15 @@ func TestEngine_processThresholds(t *testing.T) { thresholds[m] = ths } - e, err := newTestEngine(nil, lib.Options{Thresholds: thresholds}) - assert.NoError(t, err) + e, _, wait := newTestEngine(t, nil, nil, nil, lib.Options{Thresholds: thresholds}) + defer wait() e.processSamples( []stats.SampleContainer{stats.Sample{Metric: metric, Value: 1.25, Tags: stats.IntoSampleTags(&map[string]string{"a": "1"})}}, ) - abortCalled := false - - abortFunc := func() { - abortCalled = true - } - - e.processThresholds(abortFunc) - + assert.Equal(t, data.abort, e.processThresholds()) assert.Equal(t, data.pass, !e.IsTainted()) - if data.abort { - assert.True(t, abortCalled) - } }) } } @@ -492,6 +371,7 @@ func getMetricSum(collector *dummy.Collector, name string) (result float64) { } return } + func getMetricCount(collector *dummy.Collector, name string) (result uint) { for _, sc := range collector.SampleContainers { for _, s := range sc.GetSamples() { @@ -556,7 +436,7 @@ func TestSentReceivedMetrics(t *testing.T) { type testCase struct{ Iterations, VUs int64 } testCases := []testCase{ - {1, 1}, {1, 2}, {2, 1}, {5, 2}, {25, 2}, {50, 5}, + {1, 1}, {2, 2}, {2, 1}, {5, 2}, {25, 2}, {50, 5}, } runTest := func(t *testing.T, ts testScript, tc testCase, noConnReuse bool) (float64, float64) { @@ -567,27 +447,18 @@ func TestSentReceivedMetrics(t *testing.T) { ) require.NoError(t, err) - options := lib.Options{ + collector := &dummy.Collector{} + _, run, wait := newTestEngine(t, nil, r, []lib.Collector{collector}, lib.Options{ Iterations: null.IntFrom(tc.Iterations), VUs: null.IntFrom(tc.VUs), - VUsMax: null.IntFrom(tc.VUs), Hosts: tb.Dialer.Hosts, InsecureSkipTLSVerify: null.BoolFrom(true), NoVUConnectionReuse: null.BoolFrom(noConnReuse), Batch: null.IntFrom(20), - } - - r.SetOptions(options) - engine, err := NewEngine(local.New(r), options) - require.NoError(t, err) - - collector := &dummy.Collector{} - engine.Collectors = []lib.Collector{collector} + }) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() errC := make(chan error) - go func() { errC <- engine.Run(ctx) }() + go func() { errC <- run() }() select { case <-time.After(10 * time.Second): @@ -595,6 +466,7 @@ func TestSentReceivedMetrics(t *testing.T) { case err := <-errC: require.NoError(t, err) } + wait() checkData := func(name string, expected int64) float64 { data := getMetricSum(collector, name) @@ -708,27 +580,18 @@ func TestRunTags(t *testing.T) { ) require.NoError(t, err) - options := lib.Options{ + collector := &dummy.Collector{} + _, run, wait := newTestEngine(t, nil, r, []lib.Collector{collector}, lib.Options{ Iterations: null.IntFrom(3), VUs: null.IntFrom(2), - VUsMax: null.IntFrom(2), Hosts: tb.Dialer.Hosts, RunTags: runTags, SystemTags: &stats.DefaultSystemTagSet, InsecureSkipTLSVerify: null.BoolFrom(true), - } - - r.SetOptions(options) - engine, err := NewEngine(local.New(r), options) - require.NoError(t, err) - - collector := &dummy.Collector{} - engine.Collectors = []lib.Collector{collector} + }) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() errC := make(chan error) - go func() { errC <- engine.Run(ctx) }() + go func() { errC <- run() }() select { case <-time.After(10 * time.Second): @@ -736,6 +599,7 @@ func TestRunTags(t *testing.T) { case err := <-errC: require.NoError(t, err) } + wait() systemMetrics := []*stats.Metric{ metrics.VUs, metrics.VUsMax, metrics.Iterations, metrics.IterationDuration, @@ -806,21 +670,17 @@ func TestSetupTeardownThresholds(t *testing.T) { lib.RuntimeOptions{}, ) require.NoError(t, err) - runner.SetOptions(runner.GetOptions().Apply(lib.Options{ + + engine, run, wait := newTestEngine(t, nil, runner, nil, lib.Options{ SystemTags: &stats.DefaultSystemTagSet, SetupTimeout: types.NullDurationFrom(3 * time.Second), TeardownTimeout: types.NullDurationFrom(3 * time.Second), VUs: null.IntFrom(3), - VUsMax: null.IntFrom(3), - })) - - engine, err := NewEngine(local.New(runner), runner.GetOptions()) - require.NoError(t, err) + }) + defer wait() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() errC := make(chan error) - go func() { errC <- engine.Run(ctx) }() + go func() { errC <- run() }() select { case <-time.After(10 * time.Second): @@ -842,15 +702,20 @@ func TestEmittedMetricsWhenScalingDown(t *testing.T) { export let options = { systemTags: ["iter", "vu", "url"], - - // Start with 2 VUs for 4 seconds and then quickly scale down to 1 for the next 4s and then quit - vus: 2, - vusMax: 2, - stages: [ - { duration: "4s", target: 2 }, - { duration: "1s", target: 1 }, - { duration: "3s", target: 1 }, - ], + scenarios: { + we_need_hard_stop_and_ramp_down: { + executor: "ramping-vus", + // Start with 2 VUs for 4 seconds and then quickly scale down to 1 for the next 4s and then quit + startVUs: 2, + stages: [ + { duration: "4s", target: 2 }, + { duration: "0s", target: 1 }, + { duration: "4s", target: 1 }, + ], + gracefulStop: "0s", + gracefulRampDown: "0s", + }, + }, }; export default function () { @@ -869,26 +734,22 @@ func TestEmittedMetricsWhenScalingDown(t *testing.T) { ) require.NoError(t, err) - engine, err := NewEngine(local.New(runner), runner.GetOptions()) - require.NoError(t, err) - collector := &dummy.Collector{} - engine.Collectors = []lib.Collector{collector} + engine, run, wait := newTestEngine(t, nil, runner, []lib.Collector{collector}, lib.Options{}) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() errC := make(chan error) - go func() { errC <- engine.Run(ctx) }() + go func() { errC <- run() }() select { - case <-time.After(10 * time.Second): + case <-time.After(12 * time.Second): t.Fatal("Test timed out") case err := <-errC: require.NoError(t, err) + wait() require.False(t, engine.IsTainted()) } - // The 1.7 sleep in the default function would cause the first VU to comlete 2 full iterations + // The 3.1 sleep in the default function would cause the first VU to complete 2 full iterations // and stat executing its third one, while the second VU will only fully complete 1 iteration // and will be canceled in the middle of its second one. assert.Equal(t, 3.0, getMetricSum(collector, metrics.Iterations.Name)) @@ -897,11 +758,13 @@ func TestEmittedMetricsWhenScalingDown(t *testing.T) { // and one each from the two iterations that would be canceled in the middle of their execution assert.Equal(t, 8.0, getMetricSum(collector, metrics.HTTPReqs.Name)) - // But we expect to only see the data_received for only 7 of those requests. The data for the 8th - // request (the 3rd one in the first VU before the test ends) gets cut off by the engine because - // it's emitted after the test officially ends - dataReceivedExpectedMin := 15000.0 * 7 - dataReceivedExpectedMax := (15000.0 + expectedHeaderMaxLength) * 7 + // And we expect to see the data_received for all 8 of those requests. Previously, the data for + // the 8th request (the 3rd one in the first VU before the test ends) was cut off by the engine + // because it was emitted after the test officially ended. But that was mostly an unintended + // consequence of the fact that those metrics were emitted only after an iteration ended when + // it was interrupted. + dataReceivedExpectedMin := 15000.0 * 8 + dataReceivedExpectedMax := (15000.0 + expectedHeaderMaxLength) * 8 dataReceivedActual := getMetricSum(collector, metrics.DataReceived.Name) if dataReceivedActual < dataReceivedExpectedMin || dataReceivedActual > dataReceivedExpectedMax { t.Errorf( @@ -932,13 +795,13 @@ func TestMetricsEmission(t *testing.T) { // Since emission of Iterations happens before the minIterationDuration // sleep is done, we expect to receive metrics for all executions of // the `default` function, despite of the lower overall duration setting. - {"minIterationDuration", `"150ms"`, "testCounter.add(1);", 16.0, 16.0}, + {"minIterationDuration", `"300ms"`, "testCounter.add(1);", 16.0, 16.0}, // With the manual sleep method and no minIterationDuration, the last // `default` execution will be cutoff by the duration setting, so only // 3 sets of metrics are expected. - {"sleepBeforeCounterAdd", "null", "sleep(0.15); testCounter.add(1); ", 12.0, 12.0}, + {"sleepBeforeCounterAdd", "null", "sleep(0.3); testCounter.add(1); ", 12.0, 12.0}, // The counter should be sent, but the last iteration will be incomplete - {"sleepAfterCounterAdd", "null", "testCounter.add(1); sleep(0.15); ", 16.0, 12.0}, + {"sleepAfterCounterAdd", "null", "testCounter.add(1); sleep(0.3); ", 16.0, 12.0}, } for _, tc := range testCases { @@ -955,9 +818,14 @@ func TestMetricsEmission(t *testing.T) { let testCounter = new Counter("testcounter"); export let options = { - vus: 4, - vusMax: 4, - duration: "500ms", + scenarios: { + we_need_hard_stop: { + executor: "constant-vus", + vus: 4, + duration: "1s", + gracefulStop: "0s", + }, + }, minIterationDuration: %s, }; @@ -970,22 +838,18 @@ func TestMetricsEmission(t *testing.T) { ) require.NoError(t, err) - engine, err := NewEngine(local.New(runner), runner.GetOptions()) - require.NoError(t, err) - collector := &dummy.Collector{} - engine.Collectors = []lib.Collector{collector} + engine, run, wait := newTestEngine(t, nil, runner, []lib.Collector{collector}, runner.GetOptions()) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() errC := make(chan error) - go func() { errC <- engine.Run(ctx) }() + go func() { errC <- run() }() select { case <-time.After(10 * time.Second): t.Fatal("Test timed out") case err := <-errC: require.NoError(t, err) + wait() require.False(t, engine.IsTainted()) } @@ -1007,8 +871,15 @@ func TestMinIterationDurationInSetupTeardownStage(t *testing.T) { export let options = { minIterationDuration: "2s", - duration: "2s", - setupTimeout: "2s", + scenarios: { + we_need_hard_stop: { + executor: "constant-vus", + vus: 2, + duration: "1.9s", + gracefulStop: "0s", + }, + }, + setupTimeout: "3s", }; export default function () { @@ -1018,8 +889,15 @@ func TestMinIterationDurationInSetupTeardownStage(t *testing.T) { export let options = { minIterationDuration: "2s", - duration: "2s", - teardownTimeout: "2s", + scenarios: { + we_need_hard_stop: { + executor: "constant-vus", + vus: 2, + duration: "1.9s", + gracefulStop: "0s", + }, + }, + teardownTimeout: "3s", }; export default function () { @@ -1044,20 +922,52 @@ func TestMinIterationDurationInSetupTeardownStage(t *testing.T) { lib.RuntimeOptions{}, ) require.NoError(t, err) - engine, err := NewEngine(local.New(runner), runner.GetOptions()) - require.NoError(t, err) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + engine, run, wait := newTestEngine(t, nil, runner, nil, runner.GetOptions()) + errC := make(chan error) - go func() { errC <- engine.Run(ctx) }() + go func() { errC <- run() }() select { case <-time.After(10 * time.Second): t.Fatal("Test timed out") case err := <-errC: require.NoError(t, err) + wait() require.False(t, engine.IsTainted()) } }) } } + +func TestEngineRunsTeardownEvenAfterTestRunIsAborted(t *testing.T) { + testMetric := stats.New("teardown_metric", stats.Counter) + + ctx, cancel := context.WithCancel(context.Background()) + + runner := &minirunner.MiniRunner{ + Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { + cancel() // we cancel the runCtx immediately after the test starts + return nil + }, + TeardownFn: func(ctx context.Context, out chan<- stats.SampleContainer) error { + out <- stats.Sample{Metric: testMetric, Value: 1} + return nil + }, + } + + c := &dummy.Collector{} + _, run, wait := newTestEngine(t, ctx, runner, []lib.Collector{c}, lib.Options{ + VUs: null.IntFrom(1), Iterations: null.IntFrom(1), + }) + + assert.NoError(t, run()) + wait() + + var count float64 + for _, sample := range c.Samples { + if sample.Metric == testMetric { + count += sample.Value + } + } + assert.Equal(t, 1.0, count) +} diff --git a/core/local/local.go b/core/local/local.go index 9b7a47a9f1f..ed1d9d66232 100644 --- a/core/local/local.go +++ b/core/local/local.go @@ -23,518 +23,420 @@ package local import ( "context" "fmt" - "sync" + "runtime" "sync/atomic" "time" - "github.com/pkg/errors" "github.com/sirupsen/logrus" - null "gopkg.in/guregu/null.v3" "github.com/loadimpact/k6/lib" - "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/stats" + "github.com/loadimpact/k6/ui/pb" ) -// TODO: totally rewrite this! -// This is an overcomplicated and probably buggy piece of code that is a major PITA to refactor... -// It does a ton of stuff in a very convoluted way, has a and uses a very incomprehensible mix -// of all possible Go synchronization mechanisms (channels, mutexes, rwmutexes, atomics, -// and waitgroups) and has a bunch of contexts and tickers on top... - -var _ lib.Executor = &Executor{} - -type vuHandle struct { - sync.RWMutex - vu lib.VU - ctx context.Context - cancel context.CancelFunc +// ExecutionScheduler is the local implementation of lib.ExecutionScheduler +type ExecutionScheduler struct { + runner lib.Runner + options lib.Options + logger *logrus.Logger + + initProgress *pb.ProgressBar + executorConfigs []lib.ExecutorConfig // sorted by (startTime, ID) + executors []lib.Executor // sorted by (startTime, ID), excludes executors with no work + executionPlan []lib.ExecutionStep + maxDuration time.Duration // cached value derived from the execution plan + maxPossibleVUs uint64 // cached value derived from the execution plan + state *lib.ExecutionState } -func (h *vuHandle) run(logger *logrus.Logger, flow <-chan int64, iterDone chan<- struct{}) { - h.RLock() - ctx := h.ctx - h.RUnlock() - - for { - select { - case _, ok := <-flow: - if !ok { - return - } - case <-ctx.Done(): - return +// Check to see if we implement the lib.ExecutionScheduler interface +var _ lib.ExecutionScheduler = &ExecutionScheduler{} + +// NewExecutionScheduler creates and returns a new local lib.ExecutionScheduler +// instance, without initializing it beyond the bare minimum. Specifically, it +// creates the needed executor instances and a lot of state placeholders, but it +// doesn't initialize the executors and it doesn't initialize or run VUs. +func NewExecutionScheduler(runner lib.Runner, logger *logrus.Logger) (*ExecutionScheduler, error) { + options := runner.GetOptions() + et, err := lib.NewExecutionTuple(options.ExecutionSegment, options.ExecutionSegmentSequence) + if err != nil { + return nil, err + } + executionPlan := options.Scenarios.GetFullExecutionRequirements(et) + maxPlannedVUs := lib.GetMaxPlannedVUs(executionPlan) + maxPossibleVUs := lib.GetMaxPossibleVUs(executionPlan) + + executionState := lib.NewExecutionState(options, et, maxPlannedVUs, maxPossibleVUs) + maxDuration, _ := lib.GetEndOffset(executionPlan) // we don't care if the end offset is final + + executorConfigs := options.Scenarios.GetSortedConfigs() + executors := make([]lib.Executor, 0, len(executorConfigs)) + // Only take executors which have work. + for _, sc := range executorConfigs { + if !sc.HasWork(et) { + logger.Warnf( + "Executor '%s' is disabled for segment %s due to lack of work!", + sc.GetName(), options.ExecutionSegment, + ) + continue } - - if h.vu != nil { - err := h.vu.RunOnce(ctx) - select { - case <-ctx.Done(): - // Don't log errors or emit iterations metrics from cancelled iterations - default: - if err != nil { - if s, ok := err.(fmt.Stringer); ok { - logger.Error(s.String()) - } else { - logger.Error(err.Error()) - } - } - iterDone <- struct{}{} - } - } else { - iterDone <- struct{}{} + s, err := sc.NewExecutor(executionState, logger.WithFields(logrus.Fields{ + "scenario": sc.GetName(), + "executor": sc.GetType(), + })) + if err != nil { + return nil, err } + executors = append(executors, s) } -} - -type Executor struct { - Runner lib.Runner - Logger *logrus.Logger - runLock sync.Mutex - wg sync.WaitGroup - - runSetup bool - runTeardown bool - - vus []*vuHandle - vusLock sync.RWMutex - numVUs int64 - numVUsMax int64 - nextVUID int64 - - iters int64 // Completed iterations - partIters int64 // Partial, incomplete iterations - endIters int64 // End test at this many iterations - - time int64 // Current time - endTime int64 // End test at this timestamp + if options.Paused.Bool { + if err := executionState.Pause(); err != nil { + return nil, err + } + } - pauseLock sync.RWMutex - pause chan interface{} + return &ExecutionScheduler{ + runner: runner, + logger: logger, + options: options, + + initProgress: pb.New(pb.WithConstLeft("Init")), + executors: executors, + executorConfigs: executorConfigs, + executionPlan: executionPlan, + maxDuration: maxDuration, + maxPossibleVUs: maxPossibleVUs, + state: executionState, + }, nil +} - stages []lib.Stage +// GetRunner returns the wrapped lib.Runner instance. +func (e *ExecutionScheduler) GetRunner() lib.Runner { + return e.runner +} - // Lock for: ctx, flow, out - lock sync.RWMutex +// GetState returns a pointer to the execution state struct for the local +// execution scheduler. It's guaranteed to be initialized and present, though +// see the documentation in lib/execution.go for caveats about its usage. The +// most important one is that none of the methods beyond the pause-related ones +// should be used for synchronization. +func (e *ExecutionScheduler) GetState() *lib.ExecutionState { + return e.state +} - // Current context, nil if a test isn't running right now. - ctx context.Context +// GetExecutors returns the slice of configured executor instances which +// have work, sorted by their (startTime, name) in an ascending order. +func (e *ExecutionScheduler) GetExecutors() []lib.Executor { + return e.executors +} - // Output channel to which VUs send samples. - vuOut chan stats.SampleContainer +// GetExecutorConfigs returns the slice of all executor configs, sorted by +// their (startTime, name) in an ascending order. +func (e *ExecutionScheduler) GetExecutorConfigs() []lib.ExecutorConfig { + return e.executorConfigs +} - // Channel on which VUs sigal that iterations are completed - iterDone chan struct{} +// GetInitProgressBar returns the progress bar associated with the Init +// function. After the Init is done, it is "hijacked" to display real-time +// execution statistics as a text bar. +func (e *ExecutionScheduler) GetInitProgressBar() *pb.ProgressBar { + return e.initProgress +} - // Flow control for VUs; iterations are run only after reading from this channel. - flow chan int64 +// GetExecutionPlan is a helper method so users of the local execution scheduler +// don't have to calculate the execution plan again. +func (e *ExecutionScheduler) GetExecutionPlan() []lib.ExecutionStep { + return e.executionPlan } -func New(r lib.Runner) *Executor { - var bufferSize int64 - if r != nil { - bufferSize = r.GetOptions().MetricSamplesBufferSize.Int64 +// initVU is a helper method that's used to both initialize the planned VUs +// in the Init() method, and also passed to executors so they can initialize +// any unplanned VUs themselves. +func (e *ExecutionScheduler) initVU( + samplesOut chan<- stats.SampleContainer, logger *logrus.Entry, +) (lib.InitializedVU, error) { + // Get the VU ID here, so that the VUs are (mostly) ordered by their + // number in the channel buffer + vuID := e.state.GetUniqueVUIdentifier() + vu, err := e.runner.NewVU(int64(vuID), samplesOut) + if err != nil { + return nil, fmt.Errorf("error while initializing VU #%d: '%s'", vuID, err) } - return &Executor{ - Runner: r, - Logger: logrus.StandardLogger(), - runSetup: true, - runTeardown: true, - endIters: -1, - endTime: -1, - vuOut: make(chan stats.SampleContainer, bufferSize), - iterDone: make(chan struct{}), - } + logger.Debugf("Initialized VU #%d", vuID) + return vu, nil } -func (e *Executor) Run(parent context.Context, engineOut chan<- stats.SampleContainer) (reterr error) { - e.runLock.Lock() - defer e.runLock.Unlock() - - if e.Runner != nil && e.runSetup { - if err := e.Runner.Setup(parent, engineOut); err != nil { - return err - } +// getRunStats is a helper function that can be used as the execution +// scheduler's progressbar substitute (i.e. hijack). +func (e *ExecutionScheduler) getRunStats() string { + status := "running" + if e.state.IsPaused() { + status = "paused" + } + if e.state.HasStarted() { + dur := e.state.GetCurrentTestRunDuration() + status = fmt.Sprintf("%s (%s)", status, pb.GetFixedLengthDuration(dur, e.maxDuration)) } - ctx, cancel := context.WithCancel(parent) - vuFlow := make(chan int64) - e.lock.Lock() - vuOut := e.vuOut - iterDone := e.iterDone - e.ctx = ctx - e.flow = vuFlow - e.lock.Unlock() - - var cutoff time.Time - defer func() { - if e.Runner != nil && e.runTeardown { - err := e.Runner.Teardown(parent, engineOut) - if reterr == nil { - reterr = err - } else if err != nil { - reterr = fmt.Errorf("teardown error %#v\nPrevious error: %#v", err, reterr) - } - } - - close(vuFlow) - cancel() + vusFmt := pb.GetFixedLengthIntFormat(int64(e.maxPossibleVUs)) + return fmt.Sprintf( + "%s, "+vusFmt+"/"+vusFmt+" VUs, %d complete and %d interrupted iterations", + status, e.state.GetCurrentlyActiveVUsCount(), e.state.GetInitializedVUsCount(), + e.state.GetFullIterationCount(), e.state.GetPartialIterationCount(), + ) +} - e.lock.Lock() - e.ctx = nil - e.vuOut = nil - e.flow = nil - e.lock.Unlock() +func (e *ExecutionScheduler) initVUsConcurrently( + ctx context.Context, samplesOut chan<- stats.SampleContainer, count uint64, + concurrency int, logger *logrus.Entry, +) chan error { + doneInits := make(chan error, count) // poor man's early-return waitgroup + limiter := make(chan struct{}) - wait := make(chan interface{}) + for i := 0; i < concurrency; i++ { go func() { - e.wg.Wait() - close(wait) - }() - - for { - select { - case <-iterDone: - // Spool through all remaining iterations, do not emit stats since the Run() is over - case newSampleContainer := <-vuOut: - if cutoff.IsZero() { - engineOut <- newSampleContainer - } else if csc, ok := newSampleContainer.(stats.ConnectedSampleContainer); ok && csc.GetTime().Before(cutoff) { - engineOut <- newSampleContainer - } else { - for _, s := range newSampleContainer.GetSamples() { - if s.Time.Before(cutoff) { - engineOut <- s - } - } + for range limiter { + newVU, err := e.initVU(samplesOut, logger) + if err == nil { + e.state.AddInitializedVU(newVU) } - case <-wait: + doneInits <- err } - select { - case <-wait: - close(vuOut) - return - default: - } - } - }() - - startVUs := atomic.LoadInt64(&e.numVUs) - if err := e.scale(ctx, lib.Max(0, startVUs)); err != nil { - return err + }() } - ticker := time.NewTicker(1 * time.Millisecond) - defer ticker.Stop() - - lastTick := time.Now() - for { - // If the test is paused, sleep until either the pause or the test ends. - // Also shift the last tick to omit time spent paused, but not partial ticks. - e.pauseLock.RLock() - pause := e.pause - e.pauseLock.RUnlock() - if pause != nil { - e.Logger.Debug("Local: Pausing!") - leftovers := time.Since(lastTick) + go func() { + defer close(limiter) + for vuNum := uint64(0); vuNum < count; vuNum++ { select { - case <-pause: - e.Logger.Debug("Local: No longer paused") - lastTick = time.Now().Add(-leftovers) + case limiter <- struct{}{}: case <-ctx.Done(): - e.Logger.Debug("Local: Terminated while in paused state") - return nil + return } } + }() - // Dumb hack: we don't wanna start any more iterations than the max, but we can't - // conditionally select on a channel either...so, we cheat: swap out the flow channel for a - // nil channel (writing to nil always blocks) if we don't wanna write an iteration. - flow := vuFlow - end := atomic.LoadInt64(&e.endIters) - partials := atomic.LoadInt64(&e.partIters) - if end >= 0 && partials >= end { - flow = nil - } + return doneInits +} +// Init concurrently initializes all of the planned VUs and then sequentially +// initializes all of the configured executors. +func (e *ExecutionScheduler) Init(ctx context.Context, samplesOut chan<- stats.SampleContainer) error { + logger := e.logger.WithField("phase", "local-execution-scheduler-init") + + vusToInitialize := lib.GetMaxPlannedVUs(e.executionPlan) + logger.WithFields(logrus.Fields{ + "neededVUs": vusToInitialize, + "executorsCount": len(e.executors), + }).Debugf("Start of initialization") + + subctx, cancel := context.WithCancel(ctx) + defer cancel() + + e.state.SetExecutionStatus(lib.ExecutionStatusInitVUs) + doneInits := e.initVUsConcurrently(subctx, samplesOut, vusToInitialize, runtime.GOMAXPROCS(0), logger) + + initializedVUs := new(uint64) + vusFmt := pb.GetFixedLengthIntFormat(int64(vusToInitialize)) + e.initProgress.Modify( + pb.WithProgress(func() (float64, []string) { + doneVUs := atomic.LoadUint64(initializedVUs) + right := fmt.Sprintf(vusFmt+"/%d VUs initialized", doneVUs, vusToInitialize) + return float64(doneVUs) / float64(vusToInitialize), []string{right} + }), + ) + + for vuNum := uint64(0); vuNum < vusToInitialize; vuNum++ { select { - case flow <- partials: - // Start an iteration if there's a VU waiting. See also: the big comment block above. - atomic.AddInt64(&e.partIters, 1) - case t := <-ticker.C: - // Every tick, increment the clock, see if we passed the end point, and process stages. - // If the test ends this way, set a cutoff point; any samples collected past the cutoff - // point are excluded. - d := t.Sub(lastTick) - lastTick = t - - end := time.Duration(atomic.LoadInt64(&e.endTime)) - at := time.Duration(atomic.AddInt64(&e.time, int64(d))) - if end >= 0 && at >= end { - e.Logger.WithFields(logrus.Fields{"at": at, "end": end}).Debug("Local: Hit time limit") - cutoff = time.Now() - return nil - } - - stages := e.stages - if len(stages) > 0 { - vus, keepRunning := ProcessStages(startVUs, stages, at) - if !keepRunning { - e.Logger.WithField("at", at).Debug("Local: Ran out of stages") - cutoff = time.Now() - return nil - } - if vus.Valid { - if err := e.SetVUs(vus.Int64); err != nil { - return err - } - } - } - case sampleContainer := <-vuOut: - engineOut <- sampleContainer - case <-iterDone: - // Every iteration ends with a write to iterDone. Check if we've hit the end point. - // If not, make sure to include an Iterations bump in the list! - end := atomic.LoadInt64(&e.endIters) - at := atomic.AddInt64(&e.iters, 1) - if end >= 0 && at >= end { - e.Logger.WithFields(logrus.Fields{"at": at, "end": end}).Debug("Local: Hit iteration limit") - return nil + case err := <-doneInits: + if err != nil { + logger.WithError(err).Debug("VU initialization returned with an error, aborting...") + // the context's cancel() is called in a defer above and will + // abort any in-flight VU initializations + return err } + atomic.AddUint64(initializedVUs, 1) case <-ctx.Done(): - // If the test is cancelled, just set the cutoff point to now and proceed down the same - // logic as if the time limit was hit. - e.Logger.Debug("Local: Exiting with context") - cutoff = time.Now() - return nil + return ctx.Err() } } -} -func (e *Executor) scale(ctx context.Context, num int64) error { - e.Logger.WithField("num", num).Debug("Local: Scaling...") - - e.vusLock.Lock() - defer e.vusLock.Unlock() - - e.lock.RLock() - flow := e.flow - iterDone := e.iterDone - e.lock.RUnlock() - - for i, handle := range e.vus { - handle := handle - handle.RLock() - cancel := handle.cancel - handle.RUnlock() - - if i < int(num) { - if cancel == nil { - vuctx, cancel := context.WithCancel(ctx) - handle.Lock() - handle.ctx = vuctx - handle.cancel = cancel - handle.Unlock() - - if handle.vu != nil { - if err := handle.vu.Reconfigure(atomic.AddInt64(&e.nextVUID, 1)); err != nil { - return err - } - } + e.state.SetInitVUFunc(func(ctx context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { + return e.initVU(samplesOut, logger) + }) - e.wg.Add(1) - go func() { - handle.run(e.Logger, flow, iterDone) - e.wg.Done() - }() - } - } else if cancel != nil { - handle.Lock() - handle.cancel() - handle.cancel = nil - handle.Unlock() + e.state.SetExecutionStatus(lib.ExecutionStatusInitExecutors) + logger.Debugf("Finished initializing needed VUs, start initializing executors...") + for _, exec := range e.executors { + executorConfig := exec.GetConfig() + + if err := exec.Init(ctx); err != nil { + return fmt.Errorf("error while initializing executor %s: %s", executorConfig.GetName(), err) } + logger.Debugf("Initialized executor %s", executorConfig.GetName()) } - atomic.StoreInt64(&e.numVUs, num) + e.state.SetExecutionStatus(lib.ExecutionStatusInitDone) + logger.Debugf("Initialization completed") return nil } -func (e *Executor) IsRunning() bool { - e.lock.RLock() - defer e.lock.RUnlock() - return e.ctx != nil -} - -func (e *Executor) GetRunner() lib.Runner { - return e.Runner -} - -// SetLogger sets Executor's logger. -func (e *Executor) SetLogger(l *logrus.Logger) { - e.Logger = l -} - -// GetLogger returns current Executor's logger. -func (e *Executor) GetLogger() *logrus.Logger { - return e.Logger -} - -func (e *Executor) GetStages() []lib.Stage { - return e.stages -} - -func (e *Executor) SetStages(s []lib.Stage) { - e.stages = s -} - -func (e *Executor) GetIterations() int64 { - return atomic.LoadInt64(&e.iters) -} - -func (e *Executor) GetEndIterations() null.Int { - v := atomic.LoadInt64(&e.endIters) - if v < 0 { - return null.Int{} - } - return null.IntFrom(v) -} - -func (e *Executor) SetEndIterations(i null.Int) { - if !i.Valid { - i.Int64 = -1 +// runExecutor gets called by the public Run() method once per configured +// executor, each time in a new goroutine. It is responsible for waiting out the +// configured startTime for the specific executor and then running its Run() +// method. +func (e *ExecutionScheduler) runExecutor( + runCtx context.Context, runResults chan<- error, engineOut chan<- stats.SampleContainer, executor lib.Executor, +) { + executorConfig := executor.GetConfig() + executorStartTime := executorConfig.GetStartTime() + executorLogger := e.logger.WithFields(logrus.Fields{ + "executor": executorConfig.GetName(), + "type": executorConfig.GetType(), + "startTime": executorStartTime, + }) + executorProgress := executor.GetProgress() + + // Check if we have to wait before starting the actual executor execution + if executorStartTime > 0 { + startTime := time.Now() + executorProgress.Modify( + pb.WithStatus(pb.Waiting), + pb.WithProgress(func() (float64, []string) { + remWait := (executorStartTime - time.Since(startTime)) + return 0, []string{"waiting", pb.GetFixedLengthDuration(remWait, executorStartTime)} + }), + ) + + executorLogger.Debugf("Waiting for executor start time...") + select { + case <-runCtx.Done(): + runResults <- nil // no error since executor hasn't started yet + return + case <-time.After(executorStartTime): + // continue + } } - e.Logger.WithField("i", i.Int64).Debug("Local: Setting end iterations") - atomic.StoreInt64(&e.endIters, i.Int64) -} -func (e *Executor) GetTime() time.Duration { - return time.Duration(atomic.LoadInt64(&e.time)) -} - -func (e *Executor) GetEndTime() types.NullDuration { - v := atomic.LoadInt64(&e.endTime) - if v < 0 { - return types.NullDuration{} + executorProgress.Modify( + pb.WithStatus(pb.Running), + pb.WithConstProgress(0, "started"), + ) + executorLogger.Debugf("Starting executor") + err := executor.Run(runCtx, engineOut) // executor should handle context cancel itself + if err == nil { + executorLogger.Debugf("Executor finished successfully") + } else { + executorLogger.WithField("error", err).Errorf("Executor error") } - return types.NullDurationFrom(time.Duration(v)) + runResults <- err } -func (e *Executor) SetEndTime(t types.NullDuration) { - if !t.Valid { - t.Duration = -1 +// Run the ExecutionScheduler, funneling all generated metric samples through the supplied +// out channel. +func (e *ExecutionScheduler) Run(globalCtx, runCtx context.Context, engineOut chan<- stats.SampleContainer) error { + executorsCount := len(e.executors) + logger := e.logger.WithField("phase", "local-execution-scheduler-run") + e.initProgress.Modify(pb.WithConstLeft("Run")) + + if e.state.IsPaused() { + logger.Debug("Execution is paused, waiting for resume or interrupt...") + e.state.SetExecutionStatus(lib.ExecutionStatusPausedBeforeRun) + e.initProgress.Modify(pb.WithConstProgress(1, "paused")) + select { + case <-e.state.ResumeNotify(): + // continue + case <-runCtx.Done(): + return nil + } } - e.Logger.WithField("d", t.Duration).Debug("Local: Setting end time") - atomic.StoreInt64(&e.endTime, int64(t.Duration)) -} -func (e *Executor) IsPaused() bool { - e.pauseLock.RLock() - defer e.pauseLock.RUnlock() - return e.pause != nil -} + e.state.MarkStarted() + defer e.state.MarkEnded() + e.initProgress.Modify(pb.WithConstProgress(1, "running")) -func (e *Executor) SetPaused(paused bool) { - e.Logger.WithField("paused", paused).Debug("Local: Setting paused") - e.pauseLock.Lock() - defer e.pauseLock.Unlock() + logger.WithFields(logrus.Fields{"executorsCount": executorsCount}).Debugf("Start of test run") - if paused && e.pause == nil { - e.pause = make(chan interface{}) - } else if !paused && e.pause != nil { - close(e.pause) - e.pause = nil - } -} + runResults := make(chan error, executorsCount) // nil values are successful runs -func (e *Executor) GetVUs() int64 { - return atomic.LoadInt64(&e.numVUs) -} + runSubCtx, cancel := context.WithCancel(runCtx) + defer cancel() // just in case, and to shut up go vet... -func (e *Executor) SetVUs(num int64) error { - if num < 0 { - return errors.New("vu count can't be negative") + // Run setup() before any executors, if it's not disabled + if !e.options.NoSetup.Bool { + logger.Debug("Running setup()") + e.state.SetExecutionStatus(lib.ExecutionStatusSetup) + e.initProgress.Modify(pb.WithConstProgress(1, "setup()")) + if err := e.runner.Setup(runSubCtx, engineOut); err != nil { + logger.WithField("error", err).Debug("setup() aborted by error") + return err + } } + e.initProgress.Modify(pb.WithHijack(e.getRunStats)) - if atomic.LoadInt64(&e.numVUs) == num { - return nil + // Start all executors at their particular startTime in a separate goroutine... + logger.Debug("Start all executors...") + e.state.SetExecutionStatus(lib.ExecutionStatusRunning) + for _, exec := range e.executors { + go e.runExecutor(runSubCtx, runResults, engineOut, exec) } - e.Logger.WithField("vus", num).Debug("Local: Setting VUs") - - if numVUsMax := atomic.LoadInt64(&e.numVUsMax); num > numVUsMax { - return errors.Errorf("can't raise vu count (to %d) above vu cap (%d)", num, numVUsMax) + // Wait for all executors to finish + var firstErr error + for range e.executors { + err := <-runResults + if err != nil && firstErr == nil { + logger.WithError(err).Debug("Executor returned with an error, cancelling test run...") + firstErr = err + cancel() + } } - if ctx := e.ctx; ctx != nil { - if err := e.scale(ctx, num); err != nil { + // Run teardown() after all executors are done, if it's not disabled + if !e.options.NoTeardown.Bool { + logger.Debug("Running teardown()") + e.state.SetExecutionStatus(lib.ExecutionStatusTeardown) + e.initProgress.Modify(pb.WithConstProgress(1, "teardown()")) + + // We run teardown() with the global context, so it isn't interrupted by + // aborts caused by thresholds or even Ctrl+C (unless used twice). + if err := e.runner.Teardown(globalCtx, engineOut); err != nil { + logger.WithField("error", err).Debug("teardown() aborted by error") return err } - } else { - atomic.StoreInt64(&e.numVUs, num) } - return nil -} - -func (e *Executor) GetVUsMax() int64 { - return atomic.LoadInt64(&e.numVUsMax) + return firstErr } -func (e *Executor) SetVUsMax(max int64) error { - e.Logger.WithField("max", max).Debug("Local: Setting max VUs") - if max < 0 { - return errors.New("vu cap can't be negative") - } - - numVUsMax := atomic.LoadInt64(&e.numVUsMax) - - if numVUsMax == max { - return nil - } - - if numVUs := atomic.LoadInt64(&e.numVUs); max < numVUs { - return errors.Errorf("can't lower vu cap (to %d) below vu count (%d)", max, numVUs) - } - - if max < numVUsMax { - e.vus = e.vus[:max] - atomic.StoreInt64(&e.numVUsMax, max) - return nil +// SetPaused pauses a test, if called with true. And if called with false, tries +// to start/resume it. See the lib.ExecutionScheduler interface documentation of +// the methods for the various caveats about its usage. +func (e *ExecutionScheduler) SetPaused(pause bool) error { + if !e.state.HasStarted() && e.state.IsPaused() { + if pause { + return fmt.Errorf("execution is already paused") + } + e.logger.Debug("Starting execution") + return e.state.Resume() } - e.lock.RLock() - vuOut := e.vuOut - e.lock.RUnlock() - - e.vusLock.Lock() - defer e.vusLock.Unlock() - - vus := e.vus - for i := numVUsMax; i < max; i++ { - var handle vuHandle - if e.Runner != nil { - vu, err := e.Runner.NewVU(vuOut) - if err != nil { - return err - } - handle.vu = vu + for _, exec := range e.executors { + pausableExecutor, ok := exec.(lib.PausableExecutor) + if !ok { + return fmt.Errorf( + "%s executor '%s' doesn't support pause and resume operations after its start", + exec.GetConfig().GetType(), exec.GetConfig().GetName(), + ) + } + if err := pausableExecutor.SetPaused(pause); err != nil { + return err } - vus = append(vus, &handle) } - e.vus = vus - - atomic.StoreInt64(&e.numVUsMax, max) - - return nil -} - -func (e *Executor) SetRunSetup(r bool) { - e.runSetup = r -} - -func (e *Executor) SetRunTeardown(r bool) { - e.runTeardown = r + if pause { + return e.state.Pause() + } + return e.state.Resume() } diff --git a/core/local/local_test.go b/core/local/local_test.go index 0a1e73570e4..2faa8d6d8bf 100644 --- a/core/local/local_test.go +++ b/core/local/local_test.go @@ -22,52 +22,658 @@ package local import ( "context" + "errors" + "fmt" "net" "net/url" + "reflect" "runtime" "sync/atomic" "testing" "time" - "github.com/loadimpact/k6/lib/netext" - "github.com/loadimpact/k6/loader" + "github.com/sirupsen/logrus" + logtest "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" "github.com/loadimpact/k6/js" "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/executor" "github.com/loadimpact/k6/lib/metrics" + "github.com/loadimpact/k6/lib/netext" + "github.com/loadimpact/k6/lib/netext/httpext" + "github.com/loadimpact/k6/lib/testutils" + "github.com/loadimpact/k6/lib/testutils/httpmultibin" + "github.com/loadimpact/k6/lib/testutils/minirunner" "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/loader" "github.com/loadimpact/k6/stats" - "github.com/pkg/errors" - logtest "github.com/sirupsen/logrus/hooks/test" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - null "gopkg.in/guregu/null.v3" ) -func TestExecutorRun(t *testing.T) { - e := New(nil) - assert.NoError(t, e.SetVUsMax(10)) - assert.NoError(t, e.SetVUs(10)) +func newTestExecutionScheduler( + t *testing.T, runner lib.Runner, logger *logrus.Logger, opts lib.Options, +) (ctx context.Context, cancel func(), execScheduler *ExecutionScheduler, samples chan stats.SampleContainer) { + if runner == nil { + runner = &minirunner.MiniRunner{} + } + ctx, cancel = context.WithCancel(context.Background()) + newOpts, err := executor.DeriveScenariosFromShortcuts(lib.Options{ + MetricSamplesBufferSize: null.NewInt(200, false), + }.Apply(runner.GetOptions()).Apply(opts)) + require.NoError(t, err) + require.Empty(t, newOpts.Validate()) + + require.NoError(t, runner.SetOptions(newOpts)) - ctx, cancel := context.WithCancel(context.Background()) - err := make(chan error, 1) - samples := make(chan stats.SampleContainer, 100) - defer close(samples) + if logger == nil { + logger = logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + } + + execScheduler, err = NewExecutionScheduler(runner, logger) + require.NoError(t, err) + + samples = make(chan stats.SampleContainer, newOpts.MetricSamplesBufferSize.Int64) go func() { - for range samples { + for { + select { + case <-samples: + case <-ctx.Done(): + return + } } }() - go func() { err <- e.Run(ctx, samples) }() - cancel() + require.NoError(t, execScheduler.Init(ctx, samples)) + + return ctx, cancel, execScheduler, samples +} + +func TestExecutionSchedulerRun(t *testing.T) { + t.Parallel() + ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, nil, nil, lib.Options{}) + defer cancel() + + err := make(chan error, 1) + go func() { err <- execScheduler.Run(ctx, ctx, samples) }() assert.NoError(t, <-err) } -func TestExecutorSetupTeardownRun(t *testing.T) { +func TestExecutionSchedulerRunNonDefault(t *testing.T) { + t.Parallel() + + testCases := []struct { + name, script, expErr string + }{ + {"defaultOK", `export default function () {}`, ""}, + {"nonDefaultOK", ` + export let options = { + scenarios: { + per_vu_iters: { + executor: "per-vu-iterations", + vus: 1, + iterations: 1, + exec: "nonDefault", + }, + } + } + export function nonDefault() {}`, ""}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + runner, err := js.New(&loader.SourceData{ + URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script)}, + nil, lib.RuntimeOptions{}) + require.NoError(t, err) + + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + execScheduler, err := NewExecutionScheduler(runner, logger) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + done := make(chan struct{}) + samples := make(chan stats.SampleContainer) + go func() { + err := execScheduler.Init(ctx, samples) + if tc.expErr != "" { + assert.EqualError(t, err, tc.expErr) + } else { + assert.NoError(t, err) + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) + } + close(done) + }() + for { + select { + case <-samples: + case <-done: + return + } + } + }) + } +} + +func TestExecutionSchedulerRunEnv(t *testing.T) { + t.Parallel() + + scriptTemplate := ` + import { Counter } from "k6/metrics"; + + let errors = new Counter("errors"); + + export let options = { + scenarios: { + executor: { + executor: "%[1]s", + gracefulStop: "0.5s", + %[2]s + } + } + } + + export default function () { + if (__ENV.TESTVAR !== "%[3]s") { + console.error('Wrong env var value. Expected: %[3]s, actual: ', __ENV.TESTVAR); + errors.add(1); + } + }` + + executorConfigs := map[string]string{ + "constant-arrival-rate": ` + rate: 1, + timeUnit: "0.5s", + duration: "0.5s", + preAllocatedVUs: 1, + maxVUs: 2,`, + "constant-vus": ` + vus: 1, + duration: "0.5s",`, + "externally-controlled": ` + vus: 1, + duration: "0.5s",`, + "per-vu-iterations": ` + vus: 1, + iterations: 1,`, + "shared-iterations": ` + vus: 1, + iterations: 1,`, + "ramping-arrival-rate": ` + startRate: 1, + timeUnit: "0.5s", + preAllocatedVUs: 1, + maxVUs: 2, + stages: [ { target: 1, duration: "0.5s" } ],`, + "ramping-vus": ` + startVUs: 1, + stages: [ { target: 1, duration: "0.5s" } ],`, + } + + testCases := []struct{ name, script string }{} + + // Generate tests using global env and with env override + for ename, econf := range executorConfigs { + testCases = append(testCases, struct{ name, script string }{ + "global/" + ename, fmt.Sprintf(scriptTemplate, ename, econf, "global")}) + configWithEnvOverride := econf + "env: { TESTVAR: 'overridden' }" + testCases = append(testCases, struct{ name, script string }{ + "override/" + ename, fmt.Sprintf(scriptTemplate, ename, configWithEnvOverride, "overridden")}) + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + runner, err := js.New(&loader.SourceData{ + URL: &url.URL{Path: "/script.js"}, + Data: []byte(tc.script)}, + nil, lib.RuntimeOptions{Env: map[string]string{"TESTVAR": "global"}}) + require.NoError(t, err) + + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + execScheduler, err := NewExecutionScheduler(runner, logger) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + done := make(chan struct{}) + samples := make(chan stats.SampleContainer) + go func() { + assert.NoError(t, execScheduler.Init(ctx, samples)) + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) + close(done) + }() + for { + select { + case sample := <-samples: + if s, ok := sample.(stats.Sample); ok && s.Metric.Name == "errors" { + assert.FailNow(t, "received error sample from test") + } + case <-done: + return + } + } + }) + } +} + +func TestExecutionSchedulerSystemTags(t *testing.T) { + t.Parallel() + tb := httpmultibin.NewHTTPMultiBin(t) + defer tb.Cleanup() + sr := tb.Replacer.Replace + + script := sr(` + import http from "k6/http"; + + export let options = { + scenarios: { + per_vu_test: { + executor: "per-vu-iterations", + gracefulStop: "0s", + vus: 1, + iterations: 1, + }, + shared_test: { + executor: "shared-iterations", + gracefulStop: "0s", + vus: 1, + iterations: 1, + } + } + } + + export default function () { + http.get("HTTPBIN_IP_URL/"); + }`) + + runner, err := js.New(&loader.SourceData{ + URL: &url.URL{Path: "/script.js"}, + Data: []byte(script)}, + nil, lib.RuntimeOptions{}) + require.NoError(t, err) + + require.NoError(t, runner.SetOptions(runner.GetOptions().Apply(lib.Options{ + SystemTags: &stats.DefaultSystemTagSet, + }))) + + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + execScheduler, err := NewExecutionScheduler(runner, logger) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + samples := make(chan stats.SampleContainer) + done := make(chan struct{}) + go func() { + defer close(done) + require.NoError(t, execScheduler.Init(ctx, samples)) + require.NoError(t, execScheduler.Run(ctx, ctx, samples)) + }() + + expCommonTrailTags := stats.IntoSampleTags(&map[string]string{ + "group": "", + "method": "GET", + "name": sr("HTTPBIN_IP_URL/"), + "url": sr("HTTPBIN_IP_URL/"), + "proto": "HTTP/1.1", + "status": "200", + }) + expTrailPVUTagsRaw := expCommonTrailTags.CloneTags() + expTrailPVUTagsRaw["scenario"] = "per_vu_test" + expTrailPVUTags := stats.IntoSampleTags(&expTrailPVUTagsRaw) + expTrailSITagsRaw := expCommonTrailTags.CloneTags() + expTrailSITagsRaw["scenario"] = "shared_test" + expTrailSITags := stats.IntoSampleTags(&expTrailSITagsRaw) + expNetTrailPVUTags := stats.IntoSampleTags(&map[string]string{ + "group": "", + "scenario": "per_vu_test", + }) + expNetTrailSITags := stats.IntoSampleTags(&map[string]string{ + "group": "", + "scenario": "shared_test", + }) + + var gotCorrectTags int + for { + select { + case sample := <-samples: + switch s := sample.(type) { + case *httpext.Trail: + if s.Tags.IsEqual(expTrailPVUTags) || s.Tags.IsEqual(expTrailSITags) { + gotCorrectTags++ + } + case *netext.NetTrail: + if s.Tags.IsEqual(expNetTrailPVUTags) || s.Tags.IsEqual(expNetTrailSITags) { + gotCorrectTags++ + } + } + case <-done: + require.Equal(t, 4, gotCorrectTags, "received wrong amount of samples with expected tags") + return + } + } +} + +func TestExecutionSchedulerRunCustomTags(t *testing.T) { + t.Parallel() + tb := httpmultibin.NewHTTPMultiBin(t) + defer tb.Cleanup() + sr := tb.Replacer.Replace + + scriptTemplate := sr(` + import http from "k6/http"; + + export let options = { + scenarios: { + executor: { + executor: "%s", + gracefulStop: "0.5s", + %s + } + } + } + + export default function () { + http.get("HTTPBIN_IP_URL/"); + }`) + + executorConfigs := map[string]string{ + "constant-arrival-rate": ` + rate: 1, + timeUnit: "0.5s", + duration: "0.5s", + preAllocatedVUs: 1, + maxVUs: 2,`, + "constant-vus": ` + vus: 1, + duration: "0.5s",`, + "externally-controlled": ` + vus: 1, + duration: "0.5s",`, + "per-vu-iterations": ` + vus: 1, + iterations: 1,`, + "shared-iterations": ` + vus: 1, + iterations: 1,`, + "ramping-arrival-rate": ` + startRate: 5, + timeUnit: "0.5s", + preAllocatedVUs: 1, + maxVUs: 2, + stages: [ { target: 10, duration: "1s" } ],`, + "ramping-vus": ` + startVUs: 1, + stages: [ { target: 1, duration: "0.5s" } ],`, + } + + testCases := []struct{ name, script string }{} + + // Generate tests using custom tags + for ename, econf := range executorConfigs { + configWithCustomTag := econf + "tags: { customTag: 'value' }" + testCases = append(testCases, struct{ name, script string }{ + ename, fmt.Sprintf(scriptTemplate, ename, configWithCustomTag)}) + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + runner, err := js.New(&loader.SourceData{ + URL: &url.URL{Path: "/script.js"}, + Data: []byte(tc.script)}, + nil, lib.RuntimeOptions{}) + require.NoError(t, err) + + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + execScheduler, err := NewExecutionScheduler(runner, logger) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + done := make(chan struct{}) + samples := make(chan stats.SampleContainer) + go func() { + defer close(done) + require.NoError(t, execScheduler.Init(ctx, samples)) + require.NoError(t, execScheduler.Run(ctx, ctx, samples)) + }() + var gotTrailTag, gotNetTrailTag bool + for { + select { + case sample := <-samples: + if trail, ok := sample.(*httpext.Trail); ok && !gotTrailTag { + tags := trail.Tags.CloneTags() + if v, ok := tags["customTag"]; ok && v == "value" { + gotTrailTag = true + } + } + if netTrail, ok := sample.(*netext.NetTrail); ok && !gotNetTrailTag { + tags := netTrail.Tags.CloneTags() + if v, ok := tags["customTag"]; ok && v == "value" { + gotNetTrailTag = true + } + } + case <-done: + if !gotTrailTag || !gotNetTrailTag { + assert.FailNow(t, "a sample with expected tag wasn't received") + } + return + } + } + }) + } +} + +// Ensure that custom executor settings are unique per executor and +// that there's no "crossover"/"pollution" between executors. +// Also test that custom tags are properly set on checks and groups metrics. +func TestExecutionSchedulerRunCustomConfigNoCrossover(t *testing.T) { + t.Parallel() + tb := httpmultibin.NewHTTPMultiBin(t) + defer tb.Cleanup() + + script := tb.Replacer.Replace(` + import http from "k6/http"; + import ws from 'k6/ws'; + import { Counter } from 'k6/metrics'; + import { check, group } from 'k6'; + + let errors = new Counter('errors'); + + export let options = { + // Required for WS tests + hosts: { 'httpbin.local': '127.0.0.1' }, + scenarios: { + scenario1: { + executor: 'per-vu-iterations', + vus: 1, + iterations: 1, + gracefulStop: '0s', + maxDuration: '1s', + exec: 's1func', + env: { TESTVAR1: 'scenario1' }, + tags: { testtag1: 'scenario1' }, + }, + scenario2: { + executor: 'shared-iterations', + vus: 1, + iterations: 1, + gracefulStop: '1s', + startTime: '0.5s', + maxDuration: '2s', + exec: 's2func', + env: { TESTVAR2: 'scenario2' }, + tags: { testtag2: 'scenario2' }, + }, + scenario3: { + executor: 'per-vu-iterations', + vus: 1, + iterations: 1, + gracefulStop: '1s', + exec: 's3funcWS', + env: { TESTVAR3: 'scenario3' }, + tags: { testtag3: 'scenario3' }, + }, + } + } + + function checkVar(name, expected) { + if (__ENV[name] !== expected) { + console.error('Wrong ' + name + " env var value. Expected: '" + + expected + "', actual: '" + __ENV[name] + "'"); + errors.add(1); + } + } + + export function s1func() { + checkVar('TESTVAR1', 'scenario1'); + checkVar('TESTVAR2', undefined); + checkVar('TESTVAR3', undefined); + checkVar('TESTGLOBALVAR', 'global'); + + // Intentionally try to pollute the env + __ENV.TESTVAR2 = 'overridden'; + + http.get('HTTPBIN_IP_URL/', { tags: { reqtag: 'scenario1' }}); + } + + export function s2func() { + checkVar('TESTVAR1', undefined); + checkVar('TESTVAR2', 'scenario2'); + checkVar('TESTVAR3', undefined); + checkVar('TESTGLOBALVAR', 'global'); + + http.get('HTTPBIN_IP_URL/', { tags: { reqtag: 'scenario2' }}); + } + + export function s3funcWS() { + checkVar('TESTVAR1', undefined); + checkVar('TESTVAR2', undefined); + checkVar('TESTVAR3', 'scenario3'); + checkVar('TESTGLOBALVAR', 'global'); + + const customTags = { wstag: 'scenario3' }; + group('wsgroup', function() { + const response = ws.connect('WSBIN_URL/ws-echo', { tags: customTags }, + function (socket) { + socket.on('open', function() { + socket.send('hello'); + }); + socket.on('message', function(msg) { + if (msg != 'hello') { + console.error("Expected to receive 'hello' but got '" + msg + "' instead!"); + errors.add(1); + } + socket.close() + }); + socket.on('error', function (e) { + console.log('ws error: ' + e.error()); + errors.add(1); + }); + } + ); + check(response, { 'status is 101': (r) => r && r.status === 101 }, customTags); + }); + } +`) + + runner, err := js.New(&loader.SourceData{ + URL: &url.URL{Path: "/script.js"}, + Data: []byte(script)}, + nil, lib.RuntimeOptions{Env: map[string]string{"TESTGLOBALVAR": "global"}}) + require.NoError(t, err) + + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + execScheduler, err := NewExecutionScheduler(runner, logger) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + samples := make(chan stats.SampleContainer) + go func() { + assert.NoError(t, execScheduler.Init(ctx, samples)) + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) + close(samples) + }() + + expectedTrailTags := []map[string]string{ + {"testtag1": "scenario1", "reqtag": "scenario1"}, + {"testtag2": "scenario2", "reqtag": "scenario2"}, + } + expectedNetTrailTags := []map[string]string{ + {"testtag1": "scenario1"}, + {"testtag2": "scenario2"}, + } + expectedConnSampleTags := map[string]string{ + "testtag3": "scenario3", "wstag": "scenario3", + } + expectedPlainSampleTags := []map[string]string{ + {"testtag3": "scenario3"}, + {"testtag3": "scenario3", "wstag": "scenario3"}, + } + var gotSampleTags int + for sample := range samples { + switch s := sample.(type) { + case stats.Sample: + if s.Metric.Name == "errors" { + assert.FailNow(t, "received error sample from test") + } + if s.Metric.Name == "checks" || s.Metric.Name == "group_duration" { + tags := s.Tags.CloneTags() + for _, expTags := range expectedPlainSampleTags { + if reflect.DeepEqual(expTags, tags) { + gotSampleTags++ + } + } + } + case *httpext.Trail: + tags := s.Tags.CloneTags() + for _, expTags := range expectedTrailTags { + if reflect.DeepEqual(expTags, tags) { + gotSampleTags++ + } + } + case *netext.NetTrail: + tags := s.Tags.CloneTags() + for _, expTags := range expectedNetTrailTags { + if reflect.DeepEqual(expTags, tags) { + gotSampleTags++ + } + } + case stats.ConnectedSamples: + for _, sm := range s.Samples { + tags := sm.Tags.CloneTags() + if reflect.DeepEqual(expectedConnSampleTags, tags) { + gotSampleTags++ + } + } + } + } + require.Equal(t, 8, gotSampleTags, "received wrong amount of samples with expected tags") +} + +func TestExecutionSchedulerSetupTeardownRun(t *testing.T) { + t.Parallel() t.Run("Normal", func(t *testing.T) { setupC := make(chan struct{}) teardownC := make(chan struct{}) - e := New(&lib.MiniRunner{ + runner := &minirunner.MiniRunner{ SetupFn: func(ctx context.Context, out chan<- stats.SampleContainer) ([]byte, error) { close(setupC) return nil, nil @@ -76,214 +682,259 @@ func TestExecutorSetupTeardownRun(t *testing.T) { close(teardownC) return nil }, - }) + } + ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{}) - ctx, cancel := context.WithCancel(context.Background()) err := make(chan error, 1) - go func() { err <- e.Run(ctx, make(chan stats.SampleContainer, 100)) }() - cancel() + go func() { err <- execScheduler.Run(ctx, ctx, samples) }() + defer cancel() <-setupC <-teardownC assert.NoError(t, <-err) }) t.Run("Setup Error", func(t *testing.T) { - e := New(&lib.MiniRunner{ + runner := &minirunner.MiniRunner{ + SetupFn: func(ctx context.Context, out chan<- stats.SampleContainer) ([]byte, error) { + return nil, errors.New("setup error") + }, + } + ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{}) + defer cancel() + assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "setup error") + }) + t.Run("Don't Run Setup", func(t *testing.T) { + runner := &minirunner.MiniRunner{ SetupFn: func(ctx context.Context, out chan<- stats.SampleContainer) ([]byte, error) { return nil, errors.New("setup error") }, TeardownFn: func(ctx context.Context, out chan<- stats.SampleContainer) error { return errors.New("teardown error") }, + } + ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{ + NoSetup: null.BoolFrom(true), + VUs: null.IntFrom(1), + Iterations: null.IntFrom(1), }) - assert.EqualError(t, e.Run(context.Background(), make(chan stats.SampleContainer, 100)), "setup error") - - t.Run("Don't Run Setup", func(t *testing.T) { - e := New(&lib.MiniRunner{ - SetupFn: func(ctx context.Context, out chan<- stats.SampleContainer) ([]byte, error) { - return nil, errors.New("setup error") - }, - TeardownFn: func(ctx context.Context, out chan<- stats.SampleContainer) error { - return errors.New("teardown error") - }, - }) - e.SetRunSetup(false) - e.SetEndIterations(null.IntFrom(1)) - assert.NoError(t, e.SetVUsMax(1)) - assert.NoError(t, e.SetVUs(1)) - assert.EqualError(t, e.Run(context.Background(), make(chan stats.SampleContainer, 100)), "teardown error") - }) + defer cancel() + assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "teardown error") }) + t.Run("Teardown Error", func(t *testing.T) { - e := New(&lib.MiniRunner{ + runner := &minirunner.MiniRunner{ SetupFn: func(ctx context.Context, out chan<- stats.SampleContainer) ([]byte, error) { return nil, nil }, TeardownFn: func(ctx context.Context, out chan<- stats.SampleContainer) error { return errors.New("teardown error") }, + } + ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{ + VUs: null.IntFrom(1), + Iterations: null.IntFrom(1), }) - e.SetEndIterations(null.IntFrom(1)) - assert.NoError(t, e.SetVUsMax(1)) - assert.NoError(t, e.SetVUs(1)) - assert.EqualError(t, e.Run(context.Background(), make(chan stats.SampleContainer, 100)), "teardown error") - - t.Run("Don't Run Teardown", func(t *testing.T) { - e := New(&lib.MiniRunner{ - SetupFn: func(ctx context.Context, out chan<- stats.SampleContainer) ([]byte, error) { - return nil, nil - }, - TeardownFn: func(ctx context.Context, out chan<- stats.SampleContainer) error { - return errors.New("teardown error") - }, - }) - e.SetRunTeardown(false) - e.SetEndIterations(null.IntFrom(1)) - assert.NoError(t, e.SetVUsMax(1)) - assert.NoError(t, e.SetVUs(1)) - assert.NoError(t, e.Run(context.Background(), make(chan stats.SampleContainer, 100))) + defer cancel() + + assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "teardown error") + }) + t.Run("Don't Run Teardown", func(t *testing.T) { + runner := &minirunner.MiniRunner{ + SetupFn: func(ctx context.Context, out chan<- stats.SampleContainer) ([]byte, error) { + return nil, nil + }, + TeardownFn: func(ctx context.Context, out chan<- stats.SampleContainer) error { + return errors.New("teardown error") + }, + } + ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{ + NoTeardown: null.BoolFrom(true), + VUs: null.IntFrom(1), + Iterations: null.IntFrom(1), }) + defer cancel() + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) }) } -func TestExecutorSetLogger(t *testing.T) { - logger, _ := logtest.NewNullLogger() - e := New(nil) - e.SetLogger(logger) - assert.Equal(t, logger, e.GetLogger()) -} - -func TestExecutorStages(t *testing.T) { +func TestExecutionSchedulerStages(t *testing.T) { + t.Parallel() testdata := map[string]struct { Duration time.Duration Stages []lib.Stage }{ "one": { 1 * time.Second, - []lib.Stage{{Duration: types.NullDurationFrom(1 * time.Second)}}, + []lib.Stage{{Duration: types.NullDurationFrom(1 * time.Second), Target: null.IntFrom(1)}}, }, "two": { 2 * time.Second, []lib.Stage{ - {Duration: types.NullDurationFrom(1 * time.Second)}, - {Duration: types.NullDurationFrom(1 * time.Second)}, + {Duration: types.NullDurationFrom(1 * time.Second), Target: null.IntFrom(1)}, + {Duration: types.NullDurationFrom(1 * time.Second), Target: null.IntFrom(2)}, }, }, - "two/targeted": { - 2 * time.Second, + "four": { + 4 * time.Second, []lib.Stage{ {Duration: types.NullDurationFrom(1 * time.Second), Target: null.IntFrom(5)}, - {Duration: types.NullDurationFrom(1 * time.Second), Target: null.IntFrom(10)}, + {Duration: types.NullDurationFrom(3 * time.Second), Target: null.IntFrom(10)}, }, }, } for name, data := range testdata { + data := data t.Run(name, func(t *testing.T) { - e := New(&lib.MiniRunner{ + t.Parallel() + runner := &minirunner.MiniRunner{ Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { time.Sleep(100 * time.Millisecond) return nil }, - Options: lib.Options{ - MetricSamplesBufferSize: null.IntFrom(500), - }, + } + ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{ + VUs: null.IntFrom(1), + Stages: data.Stages, }) - assert.NoError(t, e.SetVUsMax(10)) - e.SetStages(data.Stages) - assert.NoError(t, e.Run(context.Background(), make(chan stats.SampleContainer, 500))) - assert.True(t, e.GetTime() >= data.Duration) + defer cancel() + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) + assert.True(t, execScheduler.GetState().GetCurrentTestRunDuration() >= data.Duration) }) } } -func TestExecutorEndTime(t *testing.T) { - e := New(&lib.MiniRunner{ +func TestExecutionSchedulerEndTime(t *testing.T) { + t.Parallel() + runner := &minirunner.MiniRunner{ Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { time.Sleep(100 * time.Millisecond) return nil }, - Options: lib.Options{MetricSamplesBufferSize: null.IntFrom(200)}, + } + ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{ + VUs: null.IntFrom(10), + Duration: types.NullDurationFrom(1 * time.Second), }) - assert.NoError(t, e.SetVUsMax(10)) - assert.NoError(t, e.SetVUs(10)) - e.SetEndTime(types.NullDurationFrom(1 * time.Second)) - assert.Equal(t, types.NullDurationFrom(1*time.Second), e.GetEndTime()) + defer cancel() + + endTime, isFinal := lib.GetEndOffset(execScheduler.GetExecutionPlan()) + assert.Equal(t, 31*time.Second, endTime) // because of the default 30s gracefulStop + assert.True(t, isFinal) startTime := time.Now() - assert.NoError(t, e.Run(context.Background(), make(chan stats.SampleContainer, 200))) - assert.True(t, time.Now().After(startTime.Add(1*time.Second)), "test did not take 1s") - - t.Run("Runtime Errors", func(t *testing.T) { - e := New(&lib.MiniRunner{ - Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { - time.Sleep(10 * time.Millisecond) - return errors.New("hi") - }, - Options: lib.Options{MetricSamplesBufferSize: null.IntFrom(200)}, - }) - assert.NoError(t, e.SetVUsMax(10)) - assert.NoError(t, e.SetVUs(10)) - e.SetEndTime(types.NullDurationFrom(100 * time.Millisecond)) - assert.Equal(t, types.NullDurationFrom(100*time.Millisecond), e.GetEndTime()) + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) + runTime := time.Since(startTime) + assert.True(t, runTime > 1*time.Second, "test did not take 1s") + assert.True(t, runTime < 10*time.Second, "took more than 10 seconds") +} - l, hook := logtest.NewNullLogger() - e.SetLogger(l) +func TestExecutionSchedulerRuntimeErrors(t *testing.T) { + t.Parallel() + runner := &minirunner.MiniRunner{ + Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { + time.Sleep(10 * time.Millisecond) + return errors.New("hi") + }, + Options: lib.Options{ + VUs: null.IntFrom(10), + Duration: types.NullDurationFrom(1 * time.Second), + }, + } + logger, hook := logtest.NewNullLogger() + ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, logger, lib.Options{}) + defer cancel() - startTime := time.Now() - assert.NoError(t, e.Run(context.Background(), make(chan stats.SampleContainer, 200))) - assert.True(t, time.Now().After(startTime.Add(100*time.Millisecond)), "test did not take 100ms") + endTime, isFinal := lib.GetEndOffset(execScheduler.GetExecutionPlan()) + assert.Equal(t, 31*time.Second, endTime) // because of the default 30s gracefulStop + assert.True(t, isFinal) - assert.NotEmpty(t, hook.Entries) - for _, e := range hook.Entries { - assert.Equal(t, "hi", e.Message) - } - }) + startTime := time.Now() + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) + runTime := time.Since(startTime) + assert.True(t, runTime > 1*time.Second, "test did not take 1s") + assert.True(t, runTime < 10*time.Second, "took more than 10 seconds") - t.Run("End Errors", func(t *testing.T) { - e := New(&lib.MiniRunner{ - Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { - <-ctx.Done() - return errors.New("hi") - }, - Options: lib.Options{MetricSamplesBufferSize: null.IntFrom(200)}, - }) - assert.NoError(t, e.SetVUsMax(10)) - assert.NoError(t, e.SetVUs(10)) - e.SetEndTime(types.NullDurationFrom(100 * time.Millisecond)) - assert.Equal(t, types.NullDurationFrom(100*time.Millisecond), e.GetEndTime()) + assert.NotEmpty(t, hook.Entries) + for _, e := range hook.Entries { + assert.Equal(t, "hi", e.Message) + } +} - l, hook := logtest.NewNullLogger() - e.SetLogger(l) +func TestExecutionSchedulerEndErrors(t *testing.T) { + t.Parallel() - startTime := time.Now() - assert.NoError(t, e.Run(context.Background(), make(chan stats.SampleContainer, 200))) - assert.True(t, time.Now().After(startTime.Add(100*time.Millisecond)), "test did not take 100ms") + exec := executor.NewConstantVUsConfig("we_need_hard_stop") + exec.VUs = null.IntFrom(10) + exec.Duration = types.NullDurationFrom(1 * time.Second) + exec.GracefulStop = types.NullDurationFrom(0 * time.Second) - assert.Empty(t, hook.Entries) - }) + runner := &minirunner.MiniRunner{ + Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { + <-ctx.Done() + return errors.New("hi") + }, + Options: lib.Options{ + Scenarios: lib.ScenarioConfigs{exec.GetName(): exec}, + }, + } + logger, hook := logtest.NewNullLogger() + ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, logger, lib.Options{}) + defer cancel() + + endTime, isFinal := lib.GetEndOffset(execScheduler.GetExecutionPlan()) + assert.Equal(t, 1*time.Second, endTime) // because of the 0s gracefulStop + assert.True(t, isFinal) + + startTime := time.Now() + assert.NoError(t, execScheduler.Run(ctx, ctx, samples)) + runTime := time.Since(startTime) + assert.True(t, runTime > 1*time.Second, "test did not take 1s") + assert.True(t, runTime < 10*time.Second, "took more than 10 seconds") + + assert.Empty(t, hook.Entries) } -func TestExecutorEndIterations(t *testing.T) { +func TestExecutionSchedulerEndIterations(t *testing.T) { + t.Parallel() metric := &stats.Metric{Name: "test_metric"} + options, err := executor.DeriveScenariosFromShortcuts(lib.Options{ + VUs: null.IntFrom(1), + Iterations: null.IntFrom(100), + }) + require.NoError(t, err) + require.Empty(t, options.Validate()) + var i int64 - e := New(&lib.MiniRunner{Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { - select { - case <-ctx.Done(): - default: - atomic.AddInt64(&i, 1) - } - out <- stats.Sample{Metric: metric, Value: 1.0} - return nil - }}) - assert.NoError(t, e.SetVUsMax(1)) - assert.NoError(t, e.SetVUs(1)) - e.SetEndIterations(null.IntFrom(100)) - assert.Equal(t, null.IntFrom(100), e.GetEndIterations()) - - samples := make(chan stats.SampleContainer, 201) - assert.NoError(t, e.Run(context.Background(), samples)) - assert.Equal(t, int64(100), e.GetIterations()) + runner := &minirunner.MiniRunner{ + Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { + select { + case <-ctx.Done(): + default: + atomic.AddInt64(&i, 1) + } + out <- stats.Sample{Metric: metric, Value: 1.0} + return nil + }, + Options: options, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + + execScheduler, err := NewExecutionScheduler(runner, logger) + require.NoError(t, err) + + samples := make(chan stats.SampleContainer, 300) + require.NoError(t, execScheduler.Init(ctx, samples)) + require.NoError(t, execScheduler.Run(ctx, ctx, samples)) + + assert.Equal(t, uint64(100), execScheduler.GetState().GetFullIterationCount()) + assert.Equal(t, uint64(0), execScheduler.GetState().GetPartialIterationCount()) assert.Equal(t, int64(100), i) + require.Equal(t, 100, len(samples)) // TODO: change to 200 https://github.com/loadimpact/k6/issues/1250 for i := 0; i < 100; i++ { mySample, ok := <-samples require.True(t, ok) @@ -291,148 +942,29 @@ func TestExecutorEndIterations(t *testing.T) { } } -func TestExecutorIsRunning(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - e := New(nil) +func TestExecutionSchedulerIsRunning(t *testing.T) { + t.Parallel() + runner := &minirunner.MiniRunner{ + Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { + <-ctx.Done() + return nil + }, + } + ctx, cancel, execScheduler, _ := newTestExecutionScheduler(t, runner, nil, lib.Options{}) + state := execScheduler.GetState() err := make(chan error) - go func() { err <- e.Run(ctx, nil) }() - for !e.IsRunning() { + go func() { err <- execScheduler.Run(ctx, ctx, nil) }() + for !state.HasStarted() { + time.Sleep(10 * time.Microsecond) } cancel() - for e.IsRunning() { + for !state.HasEnded() { + time.Sleep(10 * time.Microsecond) } assert.NoError(t, <-err) } -func TestExecutorSetVUsMax(t *testing.T) { - t.Run("Negative", func(t *testing.T) { - assert.EqualError(t, New(nil).SetVUsMax(-1), "vu cap can't be negative") - }) - - t.Run("Raise", func(t *testing.T) { - e := New(nil) - - assert.NoError(t, e.SetVUsMax(50)) - assert.Equal(t, int64(50), e.GetVUsMax()) - - assert.NoError(t, e.SetVUsMax(100)) - assert.Equal(t, int64(100), e.GetVUsMax()) - - t.Run("Lower", func(t *testing.T) { - assert.NoError(t, e.SetVUsMax(50)) - assert.Equal(t, int64(50), e.GetVUsMax()) - }) - }) - - t.Run("TooLow", func(t *testing.T) { - e := New(nil) - e.ctx = context.Background() - - assert.NoError(t, e.SetVUsMax(100)) - assert.Equal(t, int64(100), e.GetVUsMax()) - - assert.NoError(t, e.SetVUs(100)) - assert.Equal(t, int64(100), e.GetVUs()) - - assert.EqualError(t, e.SetVUsMax(50), "can't lower vu cap (to 50) below vu count (100)") - }) -} - -func TestExecutorSetVUs(t *testing.T) { - t.Run("Negative", func(t *testing.T) { - assert.EqualError(t, New(nil).SetVUs(-1), "vu count can't be negative") - }) - - t.Run("Too High", func(t *testing.T) { - assert.EqualError(t, New(nil).SetVUs(100), "can't raise vu count (to 100) above vu cap (0)") - }) - - t.Run("Raise", func(t *testing.T) { - e := New(&lib.MiniRunner{Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { - return nil - }}) - e.ctx = context.Background() - - assert.NoError(t, e.SetVUsMax(100)) - assert.Equal(t, int64(100), e.GetVUsMax()) - if assert.Len(t, e.vus, 100) { - num := 0 - for i, handle := range e.vus { - num++ - if assert.NotNil(t, handle.vu, "vu %d lacks impl", i) { - assert.Equal(t, int64(0), handle.vu.(*lib.MiniRunnerVU).ID) - } - assert.Nil(t, handle.ctx, "vu %d has ctx", i) - assert.Nil(t, handle.cancel, "vu %d has cancel", i) - } - assert.Equal(t, 100, num) - } - - assert.NoError(t, e.SetVUs(50)) - assert.Equal(t, int64(50), e.GetVUs()) - if assert.Len(t, e.vus, 100) { - num := 0 - for i, handle := range e.vus { - if i < 50 { - assert.NotNil(t, handle.cancel, "vu %d lacks cancel", i) - assert.Equal(t, int64(i+1), handle.vu.(*lib.MiniRunnerVU).ID) - num++ - } else { - assert.Nil(t, handle.cancel, "vu %d has cancel", i) - assert.Equal(t, int64(0), handle.vu.(*lib.MiniRunnerVU).ID) - } - } - assert.Equal(t, 50, num) - } - - assert.NoError(t, e.SetVUs(100)) - assert.Equal(t, int64(100), e.GetVUs()) - if assert.Len(t, e.vus, 100) { - num := 0 - for i, handle := range e.vus { - assert.NotNil(t, handle.cancel, "vu %d lacks cancel", i) - assert.Equal(t, int64(i+1), handle.vu.(*lib.MiniRunnerVU).ID) - num++ - } - assert.Equal(t, 100, num) - } - - t.Run("Lower", func(t *testing.T) { - assert.NoError(t, e.SetVUs(50)) - assert.Equal(t, int64(50), e.GetVUs()) - if assert.Len(t, e.vus, 100) { - num := 0 - for i, handle := range e.vus { - if i < 50 { - assert.NotNil(t, handle.cancel, "vu %d lacks cancel", i) - num++ - } else { - assert.Nil(t, handle.cancel, "vu %d has cancel", i) - } - assert.Equal(t, int64(i+1), handle.vu.(*lib.MiniRunnerVU).ID) - } - assert.Equal(t, 50, num) - } - - t.Run("Raise", func(t *testing.T) { - assert.NoError(t, e.SetVUs(100)) - assert.Equal(t, int64(100), e.GetVUs()) - if assert.Len(t, e.vus, 100) { - for i, handle := range e.vus { - assert.NotNil(t, handle.cancel, "vu %d lacks cancel", i) - if i < 50 { - assert.Equal(t, int64(i+1), handle.vu.(*lib.MiniRunnerVU).ID) - } else { - assert.Equal(t, int64(50+i+1), handle.vu.(*lib.MiniRunnerVU).ID) - } - } - } - }) - }) - }) -} - func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { if runtime.GOOS == "windows" { t.Skip() @@ -475,31 +1007,33 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { counter.add(6, { place: "defaultAfterSleep" }); }`) - runner, err := js.New( - &loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, - nil, - lib.RuntimeOptions{}, - ) + runner, err := js.New(&loader.SourceData{URL: &url.URL{Path: "/script.js"}, Data: script}, nil, lib.RuntimeOptions{}) require.NoError(t, err) - options := lib.Options{ + options, err := executor.DeriveScenariosFromShortcuts(runner.GetOptions().Apply(lib.Options{ + Iterations: null.IntFrom(2), + VUs: null.IntFrom(1), SystemTags: &stats.DefaultSystemTagSet, SetupTimeout: types.NullDurationFrom(4 * time.Second), TeardownTimeout: types.NullDurationFrom(4 * time.Second), - } - runner.SetOptions(options) + })) + require.NoError(t, err) + require.NoError(t, runner.SetOptions(options)) - executor := New(runner) - executor.SetEndIterations(null.IntFrom(2)) - require.NoError(t, executor.SetVUsMax(1)) - require.NoError(t, executor.SetVUs(1)) + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + + execScheduler, err := NewExecutionScheduler(runner, logger) + require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) defer cancel() + done := make(chan struct{}) sampleContainers := make(chan stats.SampleContainer) go func() { - assert.NoError(t, executor.Run(ctx, sampleContainers)) + require.NoError(t, execScheduler.Init(ctx, sampleContainers)) + assert.NoError(t, execScheduler.Run(ctx, ctx, sampleContainers)) close(done) }() @@ -554,23 +1088,25 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { Value: expValue, } } - getDummyTrail := func(group string, emitIterations bool) stats.SampleContainer { + getDummyTrail := func(group string, emitIterations bool, addExpTags ...string) stats.SampleContainer { + expTags := []string{"group", group} + expTags = append(expTags, addExpTags...) return netext.NewDialer(net.Dialer{}).GetTrail(time.Now(), time.Now(), - true, emitIterations, getTags("group", group)) + true, emitIterations, getTags(expTags...)) } - // Initially give a long time (5s) for the executor to start + // Initially give a long time (5s) for the execScheduler to start expectIn(0, 5000, getSample(1, testCounter, "group", "::setup", "place", "setupBeforeSleep")) expectIn(900, 1100, getSample(2, testCounter, "group", "::setup", "place", "setupAfterSleep")) expectIn(0, 100, getDummyTrail("::setup", false)) - expectIn(0, 100, getSample(5, testCounter, "group", "", "place", "defaultBeforeSleep")) - expectIn(900, 1100, getSample(6, testCounter, "group", "", "place", "defaultAfterSleep")) - expectIn(0, 100, getDummyTrail("", true)) + expectIn(0, 100, getSample(5, testCounter, "group", "", "place", "defaultBeforeSleep", "scenario", "default")) + expectIn(900, 1100, getSample(6, testCounter, "group", "", "place", "defaultAfterSleep", "scenario", "default")) + expectIn(0, 100, getDummyTrail("", true, "scenario", "default")) - expectIn(0, 100, getSample(5, testCounter, "group", "", "place", "defaultBeforeSleep")) - expectIn(900, 1100, getSample(6, testCounter, "group", "", "place", "defaultAfterSleep")) - expectIn(0, 100, getDummyTrail("", true)) + expectIn(0, 100, getSample(5, testCounter, "group", "", "place", "defaultBeforeSleep", "scenario", "default")) + expectIn(900, 1100, getSample(6, testCounter, "group", "", "place", "defaultAfterSleep", "scenario", "default")) + expectIn(0, 100, getDummyTrail("", true, "scenario", "default")) expectIn(0, 1000, getSample(3, testCounter, "group", "::teardown", "place", "teardownBeforeSleep")) expectIn(900, 1100, getSample(4, testCounter, "group", "::teardown", "place", "teardownAfterSleep")) @@ -581,9 +1117,147 @@ func TestRealTimeAndSetupTeardownMetrics(t *testing.T) { case s := <-sampleContainers: t.Fatalf("Did not expect anything in the sample channel bug got %#v", s) case <-time.After(3 * time.Second): - t.Fatalf("Local executor took way to long to finish") + t.Fatalf("Local execScheduler took way to long to finish") case <-done: return // Exit normally } } } + +// Just a lib.PausableExecutor implementation that can return an error +type pausableExecutor struct { + lib.Executor + err error +} + +func (p pausableExecutor) SetPaused(bool) error { + return p.err +} + +func TestSetPaused(t *testing.T) { + t.Run("second pause is an error", func(t *testing.T) { + runner := &minirunner.MiniRunner{} + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + sched, err := NewExecutionScheduler(runner, logger) + require.NoError(t, err) + sched.executors = []lib.Executor{pausableExecutor{err: nil}} + + require.NoError(t, sched.SetPaused(true)) + err = sched.SetPaused(true) + require.Error(t, err) + require.Contains(t, err.Error(), "execution is already paused") + }) + + t.Run("unpause at the start is an error", func(t *testing.T) { + runner := &minirunner.MiniRunner{} + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + sched, err := NewExecutionScheduler(runner, logger) + require.NoError(t, err) + sched.executors = []lib.Executor{pausableExecutor{err: nil}} + err = sched.SetPaused(false) + require.Error(t, err) + require.Contains(t, err.Error(), "execution wasn't paused") + }) + + t.Run("second unpause is an error", func(t *testing.T) { + runner := &minirunner.MiniRunner{} + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + sched, err := NewExecutionScheduler(runner, logger) + require.NoError(t, err) + sched.executors = []lib.Executor{pausableExecutor{err: nil}} + require.NoError(t, sched.SetPaused(true)) + require.NoError(t, sched.SetPaused(false)) + err = sched.SetPaused(false) + require.Error(t, err) + require.Contains(t, err.Error(), "execution wasn't paused") + }) + + t.Run("an error on pausing is propagated", func(t *testing.T) { + runner := &minirunner.MiniRunner{} + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + sched, err := NewExecutionScheduler(runner, logger) + require.NoError(t, err) + expectedErr := errors.New("testing pausable executor error") + sched.executors = []lib.Executor{pausableExecutor{err: expectedErr}} + err = sched.SetPaused(true) + require.Error(t, err) + require.Equal(t, err, expectedErr) + }) + + t.Run("can't pause unpausable executor", func(t *testing.T) { + runner := &minirunner.MiniRunner{} + options, err := executor.DeriveScenariosFromShortcuts(lib.Options{ + Iterations: null.IntFrom(2), + VUs: null.IntFrom(1), + }.Apply(runner.GetOptions())) + require.NoError(t, err) + require.NoError(t, runner.SetOptions(options)) + + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + sched, err := NewExecutionScheduler(runner, logger) + require.NoError(t, err) + err = sched.SetPaused(true) + require.Error(t, err) + require.Contains(t, err.Error(), "doesn't support pause and resume operations after its start") + }) +} + +func TestNewExecutionSchedulerHasWork(t *testing.T) { + t.Parallel() + script := []byte(` + import http from 'k6/http'; + + export let options = { + executionSegment: "3/4:1", + executionSegmentSequence: "0,1/4,2/4,3/4,1", + scenarios: { + shared_iters1: { + executor: "shared-iterations", + vus: 3, + iterations: 3, + }, + shared_iters2: { + executor: "shared-iterations", + vus: 4, + iterations: 4, + }, + constant_arr_rate: { + executor: "constant-arrival-rate", + rate: 3, + timeUnit: "1s", + duration: "20s", + preAllocatedVUs: 4, + maxVUs: 4, + }, + }, + }; + + export default function() { + const response = http.get("http://test.loadimpact.com"); + }; +`) + + runner, err := js.New( + &loader.SourceData{ + URL: &url.URL{Path: "/script.js"}, + Data: script, + }, + nil, + lib.RuntimeOptions{}, + ) + require.NoError(t, err) + + logger := logrus.New() + logger.SetOutput(testutils.NewTestOutput(t)) + + execScheduler, err := NewExecutionScheduler(runner, logger) + require.NoError(t, err) + + assert.Len(t, execScheduler.executors, 2) + assert.Len(t, execScheduler.executorConfigs, 3) +} diff --git a/core/local/util.go b/core/local/util.go deleted file mode 100644 index 612021af378..00000000000 --- a/core/local/util.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * - * k6 - a next-generation load testing tool - * Copyright (C) 2016 Load Impact - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -package local - -import ( - "time" - - "github.com/loadimpact/k6/lib" - "gopkg.in/guregu/null.v3" -) - -// Returns the VU count and whether to keep going at the specified time. -func ProcessStages(startVUs int64, stages []lib.Stage, t time.Duration) (null.Int, bool) { - vus := null.NewInt(startVUs, false) - - var start time.Duration - for _, stage := range stages { - // Infinite stages keep running forever, with the last valid end point, or its own target. - if !stage.Duration.Valid { - if stage.Target.Valid { - vus = stage.Target - } - return vus, true - } - - // If the stage has already ended, still record the end VU count for interpolation. - end := start + time.Duration(stage.Duration.Duration) - if end < t { - if stage.Target.Valid { - vus = stage.Target - } - start = end - continue - } - - // If there's a VU target, use linear interpolation to reach it. - if stage.Target.Valid { - prog := lib.Clampf(float64(t-start)/float64(stage.Duration.Duration), 0.0, 1.0) - vus = null.IntFrom(lib.Lerp(vus.Int64, stage.Target.Int64, prog)) - } - - // We found a stage, so keep running. - return vus, true - } - return vus, false -} diff --git a/core/local/util_test.go b/core/local/util_test.go index 1a0733cc022..6801c5b3620 100644 --- a/core/local/util_test.go +++ b/core/local/util_test.go @@ -20,16 +20,8 @@ package local -import ( - "testing" - "time" - - "github.com/loadimpact/k6/lib" - "github.com/loadimpact/k6/lib/types" - "github.com/stretchr/testify/assert" - null "gopkg.in/guregu/null.v3" -) - +//TODO: translate this test to the new paradigm +/* func TestProcessStages(t *testing.T) { type checkpoint struct { D time.Duration @@ -291,3 +283,4 @@ func TestProcessStages(t *testing.T) { }) } } +*/ diff --git a/js/bundle.go b/js/bundle.go index e25915831a2..58717f09e10 100644 --- a/js/bundle.go +++ b/js/bundle.go @@ -26,16 +26,16 @@ import ( "net/url" "runtime" - "github.com/loadimpact/k6/lib/consts" - "github.com/dop251/goja" + "github.com/pkg/errors" + "github.com/spf13/afero" + "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/js/compiler" jslib "github.com/loadimpact/k6/js/lib" "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/consts" "github.com/loadimpact/k6/loader" - "github.com/pkg/errors" - "github.com/spf13/afero" ) // A Bundle is a self-contained bundle of scripts and resources. @@ -49,14 +49,20 @@ type Bundle struct { BaseInitContext *InitContext Env map[string]string - CompatibilityMode compiler.CompatibilityMode + CompatibilityMode lib.CompatibilityMode + + exports map[string]goja.Callable } // A BundleInstance is a self-contained instance of a Bundle. type BundleInstance struct { Runtime *goja.Runtime Context *context.Context - Default goja.Callable + + //TODO: maybe just have a reference to the Bundle? or save and pass rtOpts? + env map[string]string + + exports map[string]goja.Callable } // NewBundle creates a new bundle from a source file and a filesystem. @@ -83,49 +89,15 @@ func NewBundle(src *loader.SourceData, filesystems map[string]afero.Fs, rtOpts l filesystems, loader.Dir(src.URL)), Env: rtOpts.Env, CompatibilityMode: compatMode, + exports: make(map[string]goja.Callable), } if err := bundle.instantiate(rt, bundle.BaseInitContext); err != nil { return nil, err } - // Grab exports. - exportsV := rt.Get("exports") - if goja.IsNull(exportsV) || goja.IsUndefined(exportsV) { - return nil, errors.New("exports must be an object") - } - exports := exportsV.ToObject(rt) - - // Validate the default function. - def := exports.Get("default") - if def == nil || goja.IsNull(def) || goja.IsUndefined(def) { - return nil, errors.New("script must export a default function") - } - if _, ok := goja.AssertFunction(def); !ok { - return nil, errors.New("default export must be a function") - } - - // Extract/validate other exports. - for _, k := range exports.Keys() { - v := exports.Get(k) - switch k { - case "default": // Already checked above. - case "options": - data, err := json.Marshal(v.Export()) - if err != nil { - return nil, err - } - if err := json.Unmarshal(data, &bundle.Options); err != nil { - return nil, err - } - case "setup": - if _, ok := goja.AssertFunction(v); !ok { - return nil, errors.New("exported 'setup' must be a function") - } - case "teardown": - if _, ok := goja.AssertFunction(v); !ok { - return nil, errors.New("exported 'teardown' must be a function") - } - } + err = bundle.getExports(rt, true) + if err != nil { + return nil, err } return &bundle, nil @@ -137,7 +109,13 @@ func NewBundleFromArchive(arc *lib.Archive, rtOpts lib.RuntimeOptions) (*Bundle, return nil, errors.Errorf("expected bundle type 'js', got '%s'", arc.Type) } - compatMode, err := lib.ValidateCompatibilityMode(arc.CompatibilityMode) + compatModeStr := arc.CompatibilityMode + if rtOpts.CompatibilityMode.Valid { + // `k6 run --compatibility-mode=whatever archive.tar` should override + // whatever value is in the archive + compatModeStr = rtOpts.CompatibilityMode.String + } + compatMode, err := lib.ValidateCompatibilityMode(compatModeStr) if err != nil { return nil, err } @@ -147,8 +125,8 @@ func NewBundleFromArchive(arc *lib.Archive, rtOpts lib.RuntimeOptions) (*Bundle, if err != nil { return nil, err } - - initctx := NewInitContext(goja.New(), c, compatMode, + rt := goja.New() + initctx := NewInitContext(rt, c, compatMode, new(context.Context), arc.Filesystems, arc.PwdURL) env := arc.Env @@ -168,10 +146,20 @@ func NewBundleFromArchive(arc *lib.Archive, rtOpts lib.RuntimeOptions) (*Bundle, BaseInitContext: initctx, Env: env, CompatibilityMode: compatMode, + exports: make(map[string]goja.Callable), } - if err := bundle.instantiate(bundle.BaseInitContext.runtime, bundle.BaseInitContext); err != nil { + + if err = bundle.instantiate(rt, bundle.BaseInitContext); err != nil { + return nil, err + } + + // Grab exported objects, but avoid overwriting options, which would + // be initialized from the metadata.json at this point. + err = bundle.getExports(rt, false) + if err != nil { return nil, err } + return bundle, nil } @@ -196,8 +184,49 @@ func (b *Bundle) makeArchive() *lib.Archive { return arc } +// getExports validates and extracts exported objects +func (b *Bundle) getExports(rt *goja.Runtime, options bool) error { + exportsV := rt.Get("exports") + if goja.IsNull(exportsV) || goja.IsUndefined(exportsV) { + return errors.New("exports must be an object") + } + exports := exportsV.ToObject(rt) + + for _, k := range exports.Keys() { + v := exports.Get(k) + if fn, ok := goja.AssertFunction(v); ok && k != consts.Options { + b.exports[k] = fn + continue + } + switch k { + case consts.Options: + if !options { + continue + } + data, err := json.Marshal(v.Export()) + if err != nil { + return err + } + if err := json.Unmarshal(data, &b.Options); err != nil { + return err + } + case consts.SetupFn: + return errors.New("exported 'setup' must be a function") + case consts.TeardownFn: + return errors.New("exported 'teardown' must be a function") + } + } + + if len(b.exports) == 0 { + return errors.New("no exported functions in script") + } + + return nil +} + // Instantiate creates a new runtime from this bundle. func (b *Bundle) Instantiate() (bi *BundleInstance, instErr error) { + // TODO: actually use a real context here, so that the instantiation can be killed // Placeholder for a real context. ctxPtr := new(context.Context) @@ -209,11 +238,19 @@ func (b *Bundle) Instantiate() (bi *BundleInstance, instErr error) { return nil, err } - // Grab the default function; type is already checked in NewBundle(). + bi = &BundleInstance{ + Runtime: rt, + Context: ctxPtr, + exports: make(map[string]goja.Callable), + env: b.Env, + } + + // Grab any exported functions that could be executed. These were + // already pre-validated in NewBundle(), just get them here. exports := rt.Get("exports").ToObject(rt) - def, ok := goja.AssertFunction(exports.Get("default")) - if !ok || def == nil { - panic("exported default is not a function") + for k := range b.exports { + fn, _ := goja.AssertFunction(exports.Get(k)) + bi.exports[k] = fn } jsOptions := rt.Get("options") @@ -230,11 +267,7 @@ func (b *Bundle) Instantiate() (bi *BundleInstance, instErr error) { } }) - return &BundleInstance{ - Runtime: rt, - Context: ctxPtr, - Default: def, - }, instErr + return bi, instErr } // Instantiates the bundle into an existing runtime. Not public because it also messes with a bunch @@ -243,7 +276,7 @@ func (b *Bundle) instantiate(rt *goja.Runtime, init *InitContext) error { rt.SetFieldNameMapper(common.FieldNameMapper{}) rt.SetRandSource(common.NewRandSource()) - if init.compatibilityMode == compiler.CompatibilityModeExtended { + if init.compatibilityMode == lib.CompatibilityModeExtended { if _, err := rt.RunProgram(jslib.GetCoreJS()); err != nil { return err } diff --git a/js/bundle_test.go b/js/bundle_test.go index b6b76a509ef..ec8d517be64 100644 --- a/js/bundle_test.go +++ b/js/bundle_test.go @@ -33,16 +33,16 @@ import ( "time" "github.com/dop251/goja" - "github.com/loadimpact/k6/js/compiler" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/consts" "github.com/loadimpact/k6/lib/fsext" "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/loader" - "github.com/spf13/afero" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - null "gopkg.in/guregu/null.v3" ) const isWindows = runtime.GOOS == "windows" @@ -73,7 +73,7 @@ func getSimpleBundle(filename, data string, opts ...interface{}) (*Bundle, error func TestNewBundle(t *testing.T) { t.Run("Blank", func(t *testing.T) { _, err := getSimpleBundle("/script.js", "") - assert.EqualError(t, err, "script must export a default function") + assert.EqualError(t, err, "no exported functions in script") }) t.Run("Invalid", func(t *testing.T) { _, err := getSimpleBundle("/script.js", "\x00") @@ -90,15 +90,15 @@ func TestNewBundle(t *testing.T) { }) t.Run("DefaultUndefined", func(t *testing.T) { _, err := getSimpleBundle("/script.js", `export default undefined;`) - assert.EqualError(t, err, "script must export a default function") + assert.EqualError(t, err, "no exported functions in script") }) t.Run("DefaultNull", func(t *testing.T) { _, err := getSimpleBundle("/script.js", `export default null;`) - assert.EqualError(t, err, "script must export a default function") + assert.EqualError(t, err, "no exported functions in script") }) t.Run("DefaultWrongType", func(t *testing.T) { _, err := getSimpleBundle("/script.js", `export default 12345;`) - assert.EqualError(t, err, "default export must be a function") + assert.EqualError(t, err, "no exported functions in script") }) t.Run("Minimal", func(t *testing.T) { _, err := getSimpleBundle("/script.js", `export default function() {};`) @@ -114,14 +114,16 @@ func TestNewBundle(t *testing.T) { t.Run("CompatibilityMode", func(t *testing.T) { t.Run("Extended/ok/CoreJS", func(t *testing.T) { rtOpts := lib.RuntimeOptions{ - CompatibilityMode: null.StringFrom(compiler.CompatibilityModeExtended.String())} + CompatibilityMode: null.StringFrom(lib.CompatibilityModeExtended.String()), + } _, err := getSimpleBundle("/script.js", `export default function() {}; new Set([1, 2, 3, 2, 1]);`, rtOpts) assert.NoError(t, err) }) t.Run("Base/ok/Minimal", func(t *testing.T) { rtOpts := lib.RuntimeOptions{ - CompatibilityMode: null.StringFrom(compiler.CompatibilityModeBase.String())} + CompatibilityMode: null.StringFrom(lib.CompatibilityModeBase.String()), + } _, err := getSimpleBundle("/script.js", `module.exports.default = function() {};`, rtOpts) assert.NoError(t, err) @@ -133,19 +135,27 @@ func TestNewBundle(t *testing.T) { code string expErr string }{ - {"InvalidCompat", "es1", `export default function() {};`, - `invalid compatibility mode "es1". Use: "extended", "base"`}, + { + "InvalidCompat", "es1", `export default function() {};`, + `invalid compatibility mode "es1". Use: "extended", "base"`, + }, // ES2015 modules are not supported - {"Modules", "base", `export default function() {};`, - "file:///script.js: Line 1:1 Unexpected reserved word"}, + { + "Modules", "base", `export default function() {};`, + "file:///script.js: Line 1:1 Unexpected reserved word", + }, // Arrow functions are not supported - {"ArrowFuncs", "base", + { + "ArrowFuncs", "base", `module.exports.default = function() {}; () => {};`, - "file:///script.js: Line 1:42 Unexpected token ) (and 1 more errors)"}, + "file:///script.js: Line 1:42 Unexpected token ) (and 1 more errors)", + }, // ES2015 objects polyfilled by core.js are not supported - {"CoreJS", "base", + { + "CoreJS", "base", `module.exports.default = function() {}; new Set([1, 2, 3, 2, 1]);`, - "ReferenceError: Set is not defined at file:///script.js:1:45(5)"}, + "ReferenceError: Set is not defined at file:///script.js:1:45(5)", + }, } for _, tc := range testCases { @@ -206,17 +216,6 @@ func TestNewBundle(t *testing.T) { assert.Equal(t, null.IntFrom(100), b.Options.VUs) } }) - t.Run("VUsMax", func(t *testing.T) { - b, err := getSimpleBundle("/script.js", ` - export let options = { - vusMax: 100, - }; - export default function() {}; - `) - if assert.NoError(t, err) { - assert.Equal(t, null.IntFrom(100), b.Options.VUsMax) - } - }) t.Run("Duration", func(t *testing.T) { b, err := getSimpleBundle("/script.js", ` export let options = { @@ -397,7 +396,6 @@ func TestNewBundle(t *testing.T) { assert.Equal(t, b.Options.TLSVersion.Min, lib.TLSVersion(tls.VersionSSL30)) assert.Equal(t, b.Options.TLSVersion.Max, lib.TLSVersion(tls.VersionSSL30)) } - }) }) t.Run("Thresholds", func(t *testing.T) { @@ -418,88 +416,127 @@ func TestNewBundle(t *testing.T) { }) } +func getArchive(data string, rtOpts lib.RuntimeOptions) (*lib.Archive, error) { + b, err := getSimpleBundle("script.js", data, rtOpts) + if err != nil { + return nil, err + } + return b.makeArchive(), nil +} + func TestNewBundleFromArchive(t *testing.T) { - getArchive := func(data string, rtOpts lib.RuntimeOptions) (*lib.Archive, error) { - b, err := getSimpleBundle("script.js", data, rtOpts) - if err != nil { - return nil, err - } - return b.makeArchive(), nil + t.Parallel() + + es5Code := `module.exports.options = { vus: 12345 }; module.exports.default = function() { return "hi!" };` + es6Code := `export let options = { vus: 12345 }; export default function() { return "hi!"; };` + baseCompatModeRtOpts := lib.RuntimeOptions{CompatibilityMode: null.StringFrom(lib.CompatibilityModeBase.String())} + extCompatModeRtOpts := lib.RuntimeOptions{CompatibilityMode: null.StringFrom(lib.CompatibilityModeExtended.String())} + + checkBundle := func(t *testing.T, b *Bundle) { + assert.Equal(t, lib.Options{VUs: null.IntFrom(12345)}, b.Options) + bi, err := b.Instantiate() + require.NoError(t, err) + val, err := bi.exports["default"](goja.Undefined()) + require.NoError(t, err) + assert.Equal(t, "hi!", val.Export()) } - t.Run("ok", func(t *testing.T) { - testCases := []struct { - compatMode, code string - }{ - // An empty value will assume "extended" - {"", ` - export let options = { vus: 12345 }; - export default function() { return "hi!"; };`}, - {compiler.CompatibilityModeExtended.String(), ` - export let options = { vus: 12345 }; - export default function() { return "hi!"; };`}, - {compiler.CompatibilityModeBase.String(), ` - module.exports.options = { vus: 12345 }; - module.exports.default = function() { return "hi!" };`}, + checkArchive := func(t *testing.T, arc *lib.Archive, rtOpts lib.RuntimeOptions, expError string) { + b, err := NewBundleFromArchive(arc, rtOpts) + if expError != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), expError) + } else { + require.NoError(t, err) + checkBundle(t, b) } + } - for _, tc := range testCases { - tc := tc - t.Run(tc.compatMode, func(t *testing.T) { - rtOpts := lib.RuntimeOptions{CompatibilityMode: null.StringFrom(tc.compatMode)} - arc, err := getArchive(tc.code, rtOpts) - assert.NoError(t, err) - b, err := NewBundleFromArchive(arc, rtOpts) - if !assert.NoError(t, err) { - return - } - assert.Equal(t, lib.Options{VUs: null.IntFrom(12345)}, b.Options) - expCM := tc.compatMode - if expCM == "" { - expCM = compiler.CompatibilityModeExtended.String() - } - assert.Equal(t, expCM, b.CompatibilityMode.String()) + t.Run("es6_script_default", func(t *testing.T) { + t.Parallel() + arc, err := getArchive(es6Code, lib.RuntimeOptions{}) // default options + require.NoError(t, err) + require.Equal(t, lib.CompatibilityModeExtended.String(), arc.CompatibilityMode) - bi, err := b.Instantiate() - if !assert.NoError(t, err) { - return - } - val, err := bi.Default(goja.Undefined()) - if !assert.NoError(t, err) { - return - } - assert.Equal(t, "hi!", val.Export()) - }) - } + checkArchive(t, arc, lib.RuntimeOptions{}, "") // default options + checkArchive(t, arc, extCompatModeRtOpts, "") + checkArchive(t, arc, baseCompatModeRtOpts, "Unexpected reserved word") }) - t.Run("err", func(t *testing.T) { - testCases := []struct { - compatMode, code, expErr string - }{ - // Incompatible mode - {compiler.CompatibilityModeBase.String(), ` - export let options = { vus: 12345 }; - export default function() { return "hi!"; };`, - "file://script.js: Line 2:5 Unexpected reserved word (and 2 more errors)"}, - {"wrongcompat", ` - export let options = { vus: 12345 }; - export default function() { return "hi!"; };`, - `invalid compatibility mode "wrongcompat". Use: "extended", "base"`}, - } - for _, tc := range testCases { - tc := tc - t.Run(tc.compatMode, func(t *testing.T) { - rtOpts := lib.RuntimeOptions{CompatibilityMode: null.StringFrom(tc.compatMode)} - _, err := getArchive(tc.code, rtOpts) - assert.EqualError(t, err, tc.expErr) - }) + t.Run("es6_script_explicit", func(t *testing.T) { + t.Parallel() + arc, err := getArchive(es6Code, extCompatModeRtOpts) + require.NoError(t, err) + require.Equal(t, lib.CompatibilityModeExtended.String(), arc.CompatibilityMode) + + checkArchive(t, arc, lib.RuntimeOptions{}, "") + checkArchive(t, arc, extCompatModeRtOpts, "") + checkArchive(t, arc, baseCompatModeRtOpts, "Unexpected reserved word") + }) + + t.Run("es5_script_with_extended", func(t *testing.T) { + t.Parallel() + arc, err := getArchive(es5Code, lib.RuntimeOptions{}) + require.NoError(t, err) + require.Equal(t, lib.CompatibilityModeExtended.String(), arc.CompatibilityMode) + + checkArchive(t, arc, lib.RuntimeOptions{}, "") + checkArchive(t, arc, extCompatModeRtOpts, "") + checkArchive(t, arc, baseCompatModeRtOpts, "") + }) + + t.Run("es5_script", func(t *testing.T) { + t.Parallel() + arc, err := getArchive(es5Code, baseCompatModeRtOpts) + require.NoError(t, err) + require.Equal(t, lib.CompatibilityModeBase.String(), arc.CompatibilityMode) + + checkArchive(t, arc, lib.RuntimeOptions{}, "") + checkArchive(t, arc, extCompatModeRtOpts, "") + checkArchive(t, arc, baseCompatModeRtOpts, "") + }) + + t.Run("es6_archive_with_wrong_compat_mode", func(t *testing.T) { + t.Parallel() + arc, err := getArchive(es6Code, baseCompatModeRtOpts) + require.Error(t, err) + require.Nil(t, arc) + }) + + t.Run("messed_up_archive", func(t *testing.T) { + t.Parallel() + arc, err := getArchive(es6Code, extCompatModeRtOpts) + require.NoError(t, err) + arc.CompatibilityMode = "blah" // intentionally break the archive + checkArchive(t, arc, lib.RuntimeOptions{}, "invalid compatibility mode") // fails when it uses the archive one + checkArchive(t, arc, extCompatModeRtOpts, "") // works when I force the compat mode + checkArchive(t, arc, baseCompatModeRtOpts, "Unexpected reserved word") // failes because of ES6 + }) + + t.Run("script_options_dont_overwrite_metadata", func(t *testing.T) { + t.Parallel() + code := `export let options = { vus: 12345 }; export default function() { return options.vus; };` + arc := &lib.Archive{ + Type: "js", + FilenameURL: &url.URL{Scheme: "file", Path: "/script"}, + K6Version: consts.Version, + Data: []byte(code), + Options: lib.Options{VUs: null.IntFrom(999)}, + PwdURL: &url.URL{Scheme: "file", Path: "/"}, + Filesystems: nil, } + b, err := NewBundleFromArchive(arc, lib.RuntimeOptions{}) + require.NoError(t, err) + bi, err := b.Instantiate() + require.NoError(t, err) + val, err := bi.exports[consts.DefaultFn](goja.Undefined()) + require.NoError(t, err) + assert.Equal(t, int64(999), val.Export()) }) } func TestOpen(t *testing.T) { - var testCases = [...]struct { + testCases := [...]struct { name string openPath string pwd string @@ -599,8 +636,8 @@ func TestOpen(t *testing.T) { for _, tCase := range testCases { tCase := tCase - var testFunc = func(t *testing.T) { - var openPath = tCase.openPath + testFunc := func(t *testing.T) { + openPath := tCase.openPath // if fullpath prepend prefix if openPath != "" && (openPath[0] == '/' || openPath[0] == '\\') { openPath = filepath.Join(prefix, openPath) @@ -608,7 +645,7 @@ func TestOpen(t *testing.T) { if isWindows { openPath = strings.Replace(openPath, `\`, `\\`, -1) } - var pwd = tCase.pwd + pwd := tCase.pwd if pwd == "" { pwd = "/path/to/" } @@ -632,7 +669,7 @@ func TestOpen(t *testing.T) { t.Run(source, func(t *testing.T) { bi, err := b.Instantiate() require.NoError(t, err) - v, err := bi.Default(goja.Undefined()) + v, err := bi.exports["default"](goja.Undefined()) require.NoError(t, err) assert.Equal(t, "hi", v.Export()) }) @@ -670,7 +707,7 @@ func TestBundleInstantiate(t *testing.T) { } t.Run("Run", func(t *testing.T) { - v, err := bi.Default(goja.Undefined()) + v, err := bi.exports["default"](goja.Undefined()) if assert.NoError(t, err) { assert.Equal(t, true, v.Export()) } @@ -678,7 +715,7 @@ func TestBundleInstantiate(t *testing.T) { t.Run("SetAndRun", func(t *testing.T) { bi.Runtime.Set("val", false) - v, err := bi.Default(goja.Undefined()) + v, err := bi.exports["default"](goja.Undefined()) if assert.NoError(t, err) { assert.Equal(t, false, v.Export()) } @@ -733,7 +770,7 @@ func TestBundleEnv(t *testing.T) { bi, err := b.Instantiate() if assert.NoError(t, err) { - _, err := bi.Default(goja.Undefined()) + _, err := bi.exports["default"](goja.Undefined()) assert.NoError(t, err) } }) @@ -774,7 +811,7 @@ func TestBundleNotSharable(t *testing.T) { require.NoError(t, err) for j := 0; j < iters; j++ { bi.Runtime.Set("__ITER", j) - _, err := bi.Default(goja.Undefined()) + _, err := bi.exports["default"](goja.Undefined()) assert.NoError(t, err) } } @@ -784,22 +821,26 @@ func TestBundleNotSharable(t *testing.T) { func TestBundleMakeArchive(t *testing.T) { testCases := []struct { - cm compiler.CompatibilityMode + cm lib.CompatibilityMode script string exclaim string }{ - {compiler.CompatibilityModeExtended, ` + { + lib.CompatibilityModeExtended, ` import exclaim from "./exclaim.js"; export let options = { vus: 12345 }; export let file = open("./file.txt"); export default function() { return exclaim(file); };`, - `export default function(s) { return s + "!" };`}, - {compiler.CompatibilityModeBase, ` + `export default function(s) { return s + "!" };`, + }, + { + lib.CompatibilityModeBase, ` var exclaim = require("./exclaim.js"); module.exports.options = { vus: 12345 }; module.exports.file = open("./file.txt"); module.exports.default = function() { return exclaim(module.exports.file); };`, - `module.exports.default = function(s) { return s + "!" };`}, + `module.exports.default = function(s) { return s + "!" };`, + }, } for _, tc := range testCases { diff --git a/js/common/bridge_test.go b/js/common/bridge_test.go index be2fef15ff4..f31ddc77284 100644 --- a/js/common/bridge_test.go +++ b/js/common/bridge_test.go @@ -324,7 +324,7 @@ func TestBind(t *testing.T) { {"Methods", bridgeTestMethodsType{}, func(t *testing.T, obj interface{}, rt *goja.Runtime) { t.Run("unexportedFn", func(t *testing.T) { _, err := RunString(rt, `obj.unexportedFn()`) - assert.EqualError(t, err, "TypeError: Object has no member 'unexportedFn' at :1:30(3)") + assert.EqualError(t, err, "TypeError: Object has no member 'unexportedFn' at :1:17(3)") }) t.Run("ExportedFn", func(t *testing.T) { _, err := RunString(rt, `obj.exportedFn()`) @@ -332,7 +332,7 @@ func TestBind(t *testing.T) { }) t.Run("unexportedPtrFn", func(t *testing.T) { _, err := RunString(rt, `obj.unexportedPtrFn()`) - assert.EqualError(t, err, "TypeError: Object has no member 'unexportedPtrFn' at :1:33(3)") + assert.EqualError(t, err, "TypeError: Object has no member 'unexportedPtrFn' at :1:20(3)") }) t.Run("ExportedPtrFn", func(t *testing.T) { _, err := RunString(rt, `obj.exportedPtrFn()`) @@ -340,7 +340,7 @@ func TestBind(t *testing.T) { case *bridgeTestMethodsType: assert.NoError(t, err) case bridgeTestMethodsType: - assert.EqualError(t, err, "TypeError: Object has no member 'exportedPtrFn' at :1:31(3)") + assert.EqualError(t, err, "TypeError: Object has no member 'exportedPtrFn' at :1:18(3)") default: assert.Fail(t, "INVALID TYPE") } @@ -527,7 +527,7 @@ func TestBind(t *testing.T) { _, err := RunString(rt, `obj.contextInject()`) switch impl := obj.(type) { case bridgeTestContextInjectType: - assert.EqualError(t, err, "TypeError: Object has no member 'contextInject' at :1:31(3)") + assert.EqualError(t, err, "TypeError: Object has no member 'contextInject' at :1:18(3)") case *bridgeTestContextInjectType: assert.EqualError(t, err, "GoError: contextInject() can only be called from within default()") assert.Equal(t, nil, impl.ctx) @@ -546,7 +546,7 @@ func TestBind(t *testing.T) { _, err := RunString(rt, `obj.contextInjectPtr()`) switch impl := obj.(type) { case bridgeTestContextInjectPtrType: - assert.EqualError(t, err, "TypeError: Object has no member 'contextInjectPtr' at :1:34(3)") + assert.EqualError(t, err, "TypeError: Object has no member 'contextInjectPtr' at :1:21(3)") case *bridgeTestContextInjectPtrType: assert.NoError(t, err) assert.Equal(t, ctxPtr, impl.ctxPtr) @@ -566,7 +566,7 @@ func TestBind(t *testing.T) { } case bridgeTestCounterType: _, err := RunString(rt, `obj.count()`) - assert.EqualError(t, err, "TypeError: Object has no member 'count' at :1:23(3)") + assert.EqualError(t, err, "TypeError: Object has no member 'count' at :1:10(3)") default: assert.Fail(t, "UNKNOWN TYPE") } @@ -702,6 +702,7 @@ func BenchmarkProxy(b *testing.B) { } }}, {"AddError", "addWithError", bridgeTestAddWithErrorType{}, func(b *testing.B, fn interface{}) { + b.Skip() f := fn.(func(int, int) int) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -709,6 +710,7 @@ func BenchmarkProxy(b *testing.B) { } }}, {"Context", "context", bridgeTestContextType{}, func(b *testing.B, fn interface{}) { + b.Skip() f := fn.(func()) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -716,6 +718,7 @@ func BenchmarkProxy(b *testing.B) { } }}, {"ContextAdd", "contextAdd", bridgeTestContextAddType{}, func(b *testing.B, fn interface{}) { + b.Skip() f := fn.(func(int, int) int) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -723,6 +726,7 @@ func BenchmarkProxy(b *testing.B) { } }}, {"ContextAddError", "contextAddWithError", bridgeTestContextAddWithErrorType{}, func(b *testing.B, fn interface{}) { + b.Skip() f := fn.(func(int, int) int) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -737,6 +741,7 @@ func BenchmarkProxy(b *testing.B) { } }}, {"SumContext", "sumWithContext", bridgeTestSumWithContextType{}, func(b *testing.B, fn interface{}) { + b.Skip() f := fn.(func(...int) int) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -744,6 +749,7 @@ func BenchmarkProxy(b *testing.B) { } }}, {"SumError", "sumWithError", bridgeTestSumWithErrorType{}, func(b *testing.B, fn interface{}) { + b.Skip() f := fn.(func(...int) int) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -751,6 +757,7 @@ func BenchmarkProxy(b *testing.B) { } }}, {"SumContextError", "sumWithContextAndError", bridgeTestSumWithContextAndErrorType{}, func(b *testing.B, fn interface{}) { + b.Skip() f := fn.(func(...int) int) b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/js/common/init_error.go b/js/common/init_error.go index 8cfd53b644e..59237c90dee 100644 --- a/js/common/init_error.go +++ b/js/common/init_error.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package common // InitContextError is an error that happened during the a test init context diff --git a/js/common/util.go b/js/common/util.go index 397633bed45..dc66e8c954e 100644 --- a/js/common/util.go +++ b/js/common/util.go @@ -22,18 +22,24 @@ package common import ( "github.com/dop251/goja" + "github.com/loadimpact/k6/js/compiler" ) -// Runs an ES6 string in the given runtime. Use this rather than writing ES5 in tests. +// RunString Runs an string in the given runtime. Use this if writing ES5 in tests isn't a problem. func RunString(rt *goja.Runtime, src string) (goja.Value, error) { + return rt.RunString(src) +} + +// RunES6String Runs an ES6 string in the given runtime. Use this rather than writing ES5 in tests. +func RunES6String(rt *goja.Runtime, src string) (goja.Value, error) { var err error c := compiler.New() src, _, err = c.Transform(src, "__string__") if err != nil { return goja.Undefined(), err } - return rt.RunString(src) + return RunString(rt, src) } // Throws a JS error; avoids re-wrapping GoErrors. diff --git a/js/common/util_test.go b/js/common/util_test.go index f99c242ef15..3d35d418f0c 100644 --- a/js/common/util_test.go +++ b/js/common/util_test.go @@ -30,11 +30,11 @@ import ( func TestRunString(t *testing.T) { t.Run("Valid", func(t *testing.T) { - _, err := RunString(goja.New(), `let a = 1;`) + _, err := RunES6String(goja.New(), `let a = 1;`) assert.NoError(t, err) }) t.Run("Invalid", func(t *testing.T) { - _, err := RunString(goja.New(), `let a = #;`) + _, err := RunES6String(goja.New(), `let a = #;`) assert.NotNil(t, err) assert.Contains(t, err.Error(), "SyntaxError: __string__: Unexpected character '#' (1:8)\n> 1 | let a = #;\n") }) diff --git a/js/compiler/compiler.go b/js/compiler/compiler.go index 3f7b5313012..934b3fcd80d 100644 --- a/js/compiler/compiler.go +++ b/js/compiler/compiler.go @@ -31,6 +31,8 @@ import ( "github.com/dop251/goja/parser" "github.com/mitchellh/mapstructure" "github.com/sirupsen/logrus" + + "github.com/loadimpact/k6/lib" ) var ( @@ -48,18 +50,6 @@ var ( globalBabel *babel // nolint:gochecknoglobals ) -// CompatibilityMode specifies the JS compatibility mode -// nolint:lll -//go:generate enumer -type=CompatibilityMode -transform=snake -trimprefix CompatibilityMode -output compatibility_mode_gen.go -type CompatibilityMode uint8 - -const ( - // CompatibilityModeExtended achieves ES6+ compatibility with Babel and core.js - CompatibilityModeExtended CompatibilityMode = iota + 1 - // CompatibilityModeBase is standard goja ES5.1+ - CompatibilityModeBase -) - // A Compiler compiles JavaScript source code (ES5.1 or ES6) into a goja.Program type Compiler struct{} @@ -80,11 +70,11 @@ func (c *Compiler) Transform(src, filename string) (code string, srcmap *SourceM // Compile the program in the given CompatibilityMode, optionally running pre and post code. func (c *Compiler) Compile(src, filename, pre, post string, - strict bool, compatMode CompatibilityMode) (*goja.Program, string, error) { + strict bool, compatMode lib.CompatibilityMode) (*goja.Program, string, error) { code := pre + src + post ast, err := parser.ParseFile(nil, filename, code, 0) if err != nil { - if compatMode == CompatibilityModeExtended { + if compatMode == lib.CompatibilityModeExtended { code, _, err = c.Transform(src, filename) if err != nil { return nil, code, err @@ -101,7 +91,7 @@ type babel struct { vm *goja.Runtime this goja.Value transform goja.Callable - mutex sync.Mutex //TODO: cache goja.CompileAST() in an init() function? + mutex sync.Mutex // TODO: cache goja.CompileAST() in an init() function? } func newBabel() (*babel, error) { diff --git a/js/compiler/compiler_test.go b/js/compiler/compiler_test.go index add1625cbea..b8d62c572d1 100644 --- a/js/compiler/compiler_test.go +++ b/js/compiler/compiler_test.go @@ -25,6 +25,8 @@ import ( "github.com/dop251/goja" "github.com/stretchr/testify/assert" + + "github.com/loadimpact/k6/lib" ) func TestTransform(t *testing.T) { @@ -71,7 +73,7 @@ func TestCompile(t *testing.T) { c := New() t.Run("ES5", func(t *testing.T) { src := `1+(function() { return 2; })()` - pgm, code, err := c.Compile(src, "script.js", "", "", true, CompatibilityModeBase) + pgm, code, err := c.Compile(src, "script.js", "", "", true, lib.CompatibilityModeBase) if !assert.NoError(t, err) { return } @@ -83,7 +85,7 @@ func TestCompile(t *testing.T) { t.Run("Wrap", func(t *testing.T) { pgm, code, err := c.Compile(src, "script.js", - "(function(){return ", "})", true, CompatibilityModeBase) + "(function(){return ", "})", true, lib.CompatibilityModeBase) if !assert.NoError(t, err) { return } @@ -102,14 +104,14 @@ func TestCompile(t *testing.T) { t.Run("Invalid", func(t *testing.T) { src := `1+(function() { return 2; )()` - _, _, err := c.Compile(src, "script.js", "", "", true, CompatibilityModeExtended) + _, _, err := c.Compile(src, "script.js", "", "", true, lib.CompatibilityModeExtended) assert.IsType(t, &goja.Exception{}, err) assert.Contains(t, err.Error(), `SyntaxError: script.js: Unexpected token (1:26) > 1 | 1+(function() { return 2; )()`) }) }) t.Run("ES6", func(t *testing.T) { - pgm, code, err := c.Compile(`1+(()=>2)()`, "script.js", "", "", true, CompatibilityModeExtended) + pgm, code, err := c.Compile(`1+(()=>2)()`, "script.js", "", "", true, lib.CompatibilityModeExtended) if !assert.NoError(t, err) { return } @@ -120,7 +122,7 @@ func TestCompile(t *testing.T) { } t.Run("Wrap", func(t *testing.T) { - pgm, code, err := c.Compile(`fn(1+(()=>2)())`, "script.js", "(function(fn){", "})", true, CompatibilityModeExtended) + pgm, code, err := c.Compile(`fn(1+(()=>2)())`, "script.js", "(function(fn){", "})", true, lib.CompatibilityModeExtended) if !assert.NoError(t, err) { return } @@ -141,7 +143,7 @@ func TestCompile(t *testing.T) { }) t.Run("Invalid", func(t *testing.T) { - _, _, err := c.Compile(`1+(=>2)()`, "script.js", "", "", true, CompatibilityModeExtended) + _, _, err := c.Compile(`1+(=>2)()`, "script.js", "", "", true, lib.CompatibilityModeExtended) assert.IsType(t, &goja.Exception{}, err) assert.Contains(t, err.Error(), `SyntaxError: script.js: Unexpected token (1:3) > 1 | 1+(=>2)()`) diff --git a/js/console_test.go b/js/console_test.go index aa0e137a51d..159e329f1ae 100644 --- a/js/console_test.go +++ b/js/console_test.go @@ -33,7 +33,7 @@ import ( logtest "github.com/sirupsen/logrus/hooks/test" "github.com/spf13/afero" "github.com/stretchr/testify/assert" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/lib" @@ -70,29 +70,29 @@ func TestConsoleContext(t *testing.T) { assert.Equal(t, "b", entry.Message) } } -func getSimpleRunner(path, data string) (*Runner, error) { - return getSimpleRunnerWithFileFs(path, data, afero.NewMemMapFs()) -} - -func getSimpleRunnerWithOptions(path, data string, options lib.RuntimeOptions) (*Runner, error) { - return New(&loader.SourceData{ - URL: &url.URL{Path: path, Scheme: "file"}, - Data: []byte(data), - }, map[string]afero.Fs{ - "file": afero.NewMemMapFs(), - "https": afero.NewMemMapFs()}, - options) +func getSimpleRunner(filename, data string, opts ...interface{}) (*Runner, error) { + var ( + fs = afero.NewMemMapFs() + rtOpts = lib.RuntimeOptions{CompatibilityMode: null.NewString("base", true)} + ) + for _, o := range opts { + switch opt := o.(type) { + case afero.Fs: + fs = opt + case lib.RuntimeOptions: + rtOpts = opt + } + } + return New( + &loader.SourceData{ + URL: &url.URL{Path: filename, Scheme: "file"}, + Data: []byte(data), + }, + map[string]afero.Fs{"file": fs, "https": afero.NewMemMapFs()}, + rtOpts, + ) } -func getSimpleRunnerWithFileFs(path, data string, fileFs afero.Fs) (*Runner, error) { - return New(&loader.SourceData{ - URL: &url.URL{Path: path, Scheme: "file"}, - Data: []byte(data), - }, map[string]afero.Fs{ - "file": fileFs, - "https": afero.NewMemMapFs()}, - lib.RuntimeOptions{}) -} func TestConsole(t *testing.T) { levels := map[string]logrus.Level{ "log": logrus.InfoLevel, @@ -117,20 +117,25 @@ func TestConsole(t *testing.T) { args, result := args, result t.Run(args, func(t *testing.T) { r, err := getSimpleRunner("/script.js", fmt.Sprintf( - `export default function() { console.%s(%s); }`, + `exports.default = function() { console.%s(%s); }`, name, args, )) assert.NoError(t, err) samples := make(chan stats.SampleContainer, 100) - vu, err := r.newVU(samples) + initVU, err := r.newVU(1, samples) assert.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + logger, hook := logtest.NewNullLogger() logger.Level = logrus.DebugLevel - vu.Console.Logger = logger + jsVU := vu.(*ActiveVU) + jsVU.Console.Logger = logger - err = vu.RunOnce(context.Background()) + err = vu.RunOnce() assert.NoError(t, err) entry := hook.LastEntry() @@ -204,7 +209,7 @@ func TestFileConsole(t *testing.T) { } r, err := getSimpleRunner("/script", fmt.Sprintf( - `export default function() { console.%s(%s); }`, + `exports.default = function() { console.%s(%s); }`, name, args, )) assert.NoError(t, err) @@ -215,13 +220,17 @@ func TestFileConsole(t *testing.T) { assert.NoError(t, err) samples := make(chan stats.SampleContainer, 100) - vu, err := r.newVU(samples) + initVU, err := r.newVU(1, samples) assert.NoError(t, err) - vu.Console.Logger.Level = logrus.DebugLevel - hook := logtest.NewLocal(vu.Console.Logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + jsVU := vu.(*ActiveVU) + jsVU.Console.Logger.Level = logrus.DebugLevel + hook := logtest.NewLocal(jsVU.Console.Logger) - err = vu.RunOnce(context.Background()) + err = vu.RunOnce() assert.NoError(t, err) // Test if the file was created. diff --git a/js/empty_iteartions_bench_test.go b/js/empty_iteartions_bench_test.go new file mode 100644 index 00000000000..a38a91db218 --- /dev/null +++ b/js/empty_iteartions_bench_test.go @@ -0,0 +1,40 @@ +package js + +import ( + "context" + "testing" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/stats" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func BenchmarkEmptyIteration(b *testing.B) { + b.StopTimer() + + r, err := getSimpleRunner("/script.js", `exports.default = function() { }`) + if !assert.NoError(b, err) { + return + } + require.NoError(b, err) + + var ch = make(chan stats.SampleContainer, 100) + defer close(ch) + go func() { // read the channel so it doesn't block + for range ch { + } + }() + initVU, err := r.NewVU(1, ch) + if !assert.NoError(b, err) { + return + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + b.StartTimer() + for i := 0; i < b.N; i++ { + err = vu.RunOnce() + assert.NoError(b, err) + } +} diff --git a/js/http_bench_test.go b/js/http_bench_test.go index 413bc317e27..e3a828fb6fc 100644 --- a/js/http_bench_test.go +++ b/js/http_bench_test.go @@ -1,14 +1,36 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package js import ( "context" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/testutils/httpmultibin" "github.com/loadimpact/k6/stats" - "github.com/stretchr/testify/assert" - "gopkg.in/guregu/null.v3" ) func BenchmarkHTTPRequests(b *testing.B) { @@ -23,30 +45,78 @@ func BenchmarkHTTPRequests(b *testing.B) { let res = http.get(url + "/cookies/set?k2=v2&k1=v1"); if (res.status != 200) { throw new Error("wrong status: " + res.status) } } + `), lib.RuntimeOptions{CompatibilityMode: null.StringFrom("extended")}) + if !assert.NoError(b, err) { + return + } + err = r.SetOptions(lib.Options{ + Throw: null.BoolFrom(true), + MaxRedirects: null.IntFrom(10), + Hosts: tb.Dialer.Hosts, + NoCookiesReset: null.BoolFrom(true), + }) + require.NoError(b, err) + + var ch = make(chan stats.SampleContainer, 100) + defer close(ch) + go func() { // read the channel so it doesn't block + for range ch { + } + }() + initVU, err := r.NewVU(1, ch) + if !assert.NoError(b, err) { + return + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + b.StartTimer() + for i := 0; i < b.N; i++ { + err = vu.RunOnce() + assert.NoError(b, err) + } +} + +func BenchmarkHTTPRequestsBase(b *testing.B) { + b.StopTimer() + tb := httpmultibin.NewHTTPMultiBin(b) + defer tb.Cleanup() + + r, err := getSimpleRunner("/script.js", tb.Replacer.Replace(` + var http = require("k6/http"); + exports.default = function() { + var url = "HTTPBIN_URL"; + var res = http.get(url + "/cookies/set?k2=v2&k1=v1"); + if (res.status != 200) { throw new Error("wrong status: " + res.status) } + } `)) if !assert.NoError(b, err) { return } - r.SetOptions(lib.Options{ + err = r.SetOptions(lib.Options{ Throw: null.BoolFrom(true), MaxRedirects: null.IntFrom(10), Hosts: tb.Dialer.Hosts, NoCookiesReset: null.BoolFrom(true), }) + require.NoError(b, err) var ch = make(chan stats.SampleContainer, 100) + defer close(ch) go func() { // read the channel so it doesn't block - for { - <-ch + for range ch { } }() - vu, err := r.NewVU(ch) + initVU, err := r.NewVU(1, ch) if !assert.NoError(b, err) { return } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) b.StartTimer() for i := 0; i < b.N; i++ { - err = vu.RunOnce(context.Background()) + err = vu.RunOnce() assert.NoError(b, err) } } diff --git a/js/initcontext.go b/js/initcontext.go index f78620e099c..a5a2823f5ef 100644 --- a/js/initcontext.go +++ b/js/initcontext.go @@ -28,12 +28,14 @@ import ( "strings" "github.com/dop251/goja" + "github.com/pkg/errors" + "github.com/spf13/afero" + "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/js/compiler" "github.com/loadimpact/k6/js/modules" + "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/loader" - "github.com/pkg/errors" - "github.com/spf13/afero" ) type programWithSource struct { @@ -42,6 +44,9 @@ type programWithSource struct { module *goja.Object } +const openCantBeUsedOutsideInitContextMsg = `The "open()" function is only available in the init stage ` + + `(i.e. the global scope), see https://k6.io/docs/using-k6/test-life-cycle for more information` + // InitContext provides APIs for use in the init context. type InitContext struct { // Bound runtime; used to instantiate objects. @@ -58,12 +63,12 @@ type InitContext struct { // Cache of loaded programs and files. programs map[string]programWithSource - compatibilityMode compiler.CompatibilityMode + compatibilityMode lib.CompatibilityMode } // NewInitContext creates a new initcontext with the provided arguments func NewInitContext( - rt *goja.Runtime, c *compiler.Compiler, compatMode compiler.CompatibilityMode, + rt *goja.Runtime, c *compiler.Compiler, compatMode lib.CompatibilityMode, ctxPtr *context.Context, filesystems map[string]afero.Fs, pwd *url.URL, ) *InitContext { return &InitContext{ @@ -81,7 +86,7 @@ func newBoundInitContext(base *InitContext, ctxPtr *context.Context, rt *goja.Ru // we don't copy the exports as otherwise they will be shared and we don't want this. // this means that all the files will be executed again but once again only once per compilation // of the main file. - var programs = make(map[string]programWithSource, len(base.programs)) + programs := make(map[string]programWithSource, len(base.programs)) for key, program := range base.programs { programs[key] = programWithSource{ src: program.src, @@ -188,7 +193,11 @@ func (i *InitContext) compileImport(src, filename string) (*goja.Program, error) } // Open implements open() in the init context and will read and return the contents of a file -func (i *InitContext) Open(filename string, args ...string) (goja.Value, error) { +func (i *InitContext) Open(ctx context.Context, filename string, args ...string) (goja.Value, error) { + if lib.GetState(ctx) != nil { + return nil, errors.New(openCantBeUsedOutsideInitContextMsg) + } + if filename == "" { return nil, errors.New("open() can't be used with an empty filename") } diff --git a/js/initcontext_test.go b/js/initcontext_test.go index c58b3cfbdae..5a331b4d07f 100644 --- a/js/initcontext_test.go +++ b/js/initcontext_test.go @@ -229,7 +229,7 @@ func TestInitContextRequire(t *testing.T) { if !assert.NoError(t, err) { return } - _, err = bi.Default(goja.Undefined()) + _, err = bi.exports["default"](goja.Undefined()) assert.NoError(t, err) }) }) @@ -401,7 +401,7 @@ func TestRequestWithBinaryFile(t *testing.T) { ctx = common.WithRuntime(ctx, bi.Runtime) *bi.Context = ctx - v, err := bi.Default(goja.Undefined()) + v, err := bi.exports["default"](goja.Undefined()) assert.NoError(t, err) assert.NotNil(t, v) assert.Equal(t, true, v.Export()) diff --git a/js/lib/lib.go b/js/lib/lib.go index f3c6adad30f..31cff76071f 100644 --- a/js/lib/lib.go +++ b/js/lib/lib.go @@ -25,7 +25,7 @@ package lib import ( "sync" - "github.com/GeertJohan/go.rice" + rice "github.com/GeertJohan/go.rice" "github.com/dop251/goja" ) diff --git a/js/module_loading_test.go b/js/module_loading_test.go index 1ac8121b350..a5a3fa7289e 100644 --- a/js/module_loading_test.go +++ b/js/module_loading_test.go @@ -26,11 +26,13 @@ import ( "os" "testing" + "github.com/spf13/afero" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/testutils/httpmultibin" "github.com/loadimpact/k6/stats" - "github.com/spf13/afero" - "github.com/stretchr/testify/require" ) func newDevNullSampleChannel() chan stats.SampleContainer { @@ -86,7 +88,7 @@ func TestLoadOnceGlobalVars(t *testing.T) { return c.C(); } `), os.ModePerm)) - r1, err := getSimpleRunnerWithFileFs("/script.js", ` + r1, err := getSimpleRunner("/script.js", ` import { A } from "./A.js"; import { B } from "./B.js"; @@ -98,7 +100,7 @@ func TestLoadOnceGlobalVars(t *testing.T) { throw new Error("A() != B() (" + A() + ") != (" + B() + ")"); } } - `, fs) + `, fs, lib.RuntimeOptions{CompatibilityMode: null.StringFrom("extended")}) require.NoError(t, err) arc := r1.MakeArchive() @@ -111,9 +113,13 @@ func TestLoadOnceGlobalVars(t *testing.T) { t.Run(name, func(t *testing.T) { ch := newDevNullSampleChannel() defer close(ch) - vu, err := r.NewVU(ch) + initVU, err := r.NewVU(1, ch) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) require.NoError(t, err) - err = vu.RunOnce(context.Background()) + err = vu.RunOnce() require.NoError(t, err) }) } @@ -131,7 +137,7 @@ func TestLoadExportsIsUsableInModule(t *testing.T) { return exports.A() + "B"; } `), os.ModePerm)) - r1, err := getSimpleRunnerWithFileFs("/script.js", ` + r1, err := getSimpleRunner("/script.js", ` import { A, B } from "./A.js"; export default function(data) { @@ -143,7 +149,7 @@ func TestLoadExportsIsUsableInModule(t *testing.T) { throw new Error("wrong value of B() " + B()); } } - `, fs) + `, fs, lib.RuntimeOptions{CompatibilityMode: null.StringFrom("extended")}) require.NoError(t, err) arc := r1.MakeArchive() @@ -156,9 +162,12 @@ func TestLoadExportsIsUsableInModule(t *testing.T) { t.Run(name, func(t *testing.T) { ch := newDevNullSampleChannel() defer close(ch) - vu, err := r.NewVU(ch) + initVU, err := r.NewVU(1, ch) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) require.NoError(t, err) - err = vu.RunOnce(context.Background()) + err = vu.RunOnce() require.NoError(t, err) }) } @@ -177,7 +186,7 @@ func TestLoadDoesntBreakHTTPGet(t *testing.T) { return http.get("HTTPBIN_URL/get"); } `)), os.ModePerm)) - r1, err := getSimpleRunnerWithFileFs("/script.js", ` + r1, err := getSimpleRunner("/script.js", ` import { A } from "./A.js"; export default function(data) { @@ -186,7 +195,7 @@ func TestLoadDoesntBreakHTTPGet(t *testing.T) { throw new Error("wrong status "+ resp.status); } } - `, fs) + `, fs, lib.RuntimeOptions{CompatibilityMode: null.StringFrom("extended")}) require.NoError(t, err) require.NoError(t, r1.SetOptions(lib.Options{Hosts: tb.Dialer.Hosts})) @@ -200,9 +209,12 @@ func TestLoadDoesntBreakHTTPGet(t *testing.T) { t.Run(name, func(t *testing.T) { ch := newDevNullSampleChannel() defer close(ch) - vu, err := r.NewVU(ch) + initVU, err := r.NewVU(1, ch) require.NoError(t, err) - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() require.NoError(t, err) }) } @@ -217,7 +229,7 @@ func TestLoadGlobalVarsAreNotSharedBetweenVUs(t *testing.T) { return globalVar; } `), os.ModePerm)) - r1, err := getSimpleRunnerWithFileFs("/script.js", ` + r1, err := getSimpleRunner("/script.js", ` import { A } from "./A.js"; export default function(data) { @@ -228,7 +240,7 @@ func TestLoadGlobalVarsAreNotSharedBetweenVUs(t *testing.T) { throw new Error("wrong value of a " + a); } } - `, fs) + `, fs, lib.RuntimeOptions{CompatibilityMode: null.StringFrom("extended")}) require.NoError(t, err) arc := r1.MakeArchive() @@ -241,15 +253,21 @@ func TestLoadGlobalVarsAreNotSharedBetweenVUs(t *testing.T) { t.Run(name, func(t *testing.T) { ch := newDevNullSampleChannel() defer close(ch) - vu, err := r.NewVU(ch) + initVU, err := r.NewVU(1, ch) require.NoError(t, err) - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() require.NoError(t, err) // run a second VU - vu, err = r.NewVU(ch) + initVU, err = r.NewVU(2, ch) require.NoError(t, err) - err = vu.RunOnce(context.Background()) + ctx, cancel = context.WithCancel(context.Background()) + defer cancel() + vu = initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() require.NoError(t, err) }) } @@ -285,7 +303,7 @@ func TestLoadCycle(t *testing.T) { `), os.ModePerm)) data, err := afero.ReadFile(fs, "/main.js") require.NoError(t, err) - r1, err := getSimpleRunnerWithFileFs("/main.js", string(data), fs) + r1, err := getSimpleRunner("/main.js", string(data), fs, lib.RuntimeOptions{CompatibilityMode: null.StringFrom("extended")}) require.NoError(t, err) arc := r1.MakeArchive() @@ -298,9 +316,12 @@ func TestLoadCycle(t *testing.T) { t.Run(name, func(t *testing.T) { ch := newDevNullSampleChannel() defer close(ch) - vu, err := r.NewVU(ch) + initVU, err := r.NewVU(1, ch) require.NoError(t, err) - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() require.NoError(t, err) }) } @@ -331,7 +352,7 @@ func TestLoadCycleBinding(t *testing.T) { } `), os.ModePerm)) - r1, err := getSimpleRunnerWithFileFs("/main.js", ` + r1, err := getSimpleRunner("/main.js", ` import {foo} from './a.js'; import {bar} from './b.js'; export default function() { @@ -344,7 +365,7 @@ func TestLoadCycleBinding(t *testing.T) { throw new Error("Wrong value of bar() "+ barMessage); } } - `, fs) + `, fs, lib.RuntimeOptions{CompatibilityMode: null.StringFrom("extended")}) require.NoError(t, err) arc := r1.MakeArchive() @@ -357,9 +378,12 @@ func TestLoadCycleBinding(t *testing.T) { t.Run(name, func(t *testing.T) { ch := newDevNullSampleChannel() defer close(ch) - vu, err := r.NewVU(ch) + initVU, err := r.NewVU(1, ch) require.NoError(t, err) - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() require.NoError(t, err) }) } @@ -387,7 +411,7 @@ func TestBrowserified(t *testing.T) { }); `), os.ModePerm)) - r1, err := getSimpleRunnerWithFileFs("/script.js", ` + r1, err := getSimpleRunner("/script.js", ` import {alpha, bravo } from "./browserified.js"; export default function(data) { @@ -405,7 +429,7 @@ func TestBrowserified(t *testing.T) { throw new Error("bravo.B() != 'b' (" + bravo.B() + ") != 'b'"); } } - `, fs) + `, fs, lib.RuntimeOptions{CompatibilityMode: null.StringFrom("extended")}) require.NoError(t, err) arc := r1.MakeArchive() @@ -418,9 +442,12 @@ func TestBrowserified(t *testing.T) { t.Run(name, func(t *testing.T) { ch := make(chan stats.SampleContainer, 100) defer close(ch) - vu, err := r.NewVU(ch) + initVU, err := r.NewVU(1, ch) require.NoError(t, err) - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() require.NoError(t, err) }) } @@ -433,13 +460,13 @@ func TestLoadingUnexistingModuleDoesntPanic(t *testing.T) { } catch (err) { b = "correct"; } - export default function() { + exports.default = function() { if (b != "correct") { throw new Error("wrong b "+ JSON.stringify(b)); } }` require.NoError(t, afero.WriteFile(fs, "/script.js", []byte(data), 0644)) - r1, err := getSimpleRunnerWithFileFs("/script.js", data, fs) + r1, err := getSimpleRunner("/script.js", data, fs) require.NoError(t, err) arc := r1.MakeArchive() @@ -456,9 +483,12 @@ func TestLoadingUnexistingModuleDoesntPanic(t *testing.T) { t.Run(name, func(t *testing.T) { ch := newDevNullSampleChannel() defer close(ch) - vu, err := r.NewVU(ch) + initVU, err := r.NewVU(1, ch) require.NoError(t, err) - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() require.NoError(t, err) }) } diff --git a/js/modules/k6/crypto/crypto_test.go b/js/modules/k6/crypto/crypto_test.go index c6098aeef1f..4117df54d50 100644 --- a/js/modules/k6/crypto/crypto_test.go +++ b/js/modules/k6/crypto/crypto_test.go @@ -27,9 +27,10 @@ import ( "testing" "github.com/dop251/goja" + "github.com/stretchr/testify/assert" + "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/lib" - "github.com/stretchr/testify/assert" ) type MockReader struct{} @@ -51,7 +52,7 @@ func TestCryptoAlgorithms(t *testing.T) { t.Run("RandomBytesSuccess", func(t *testing.T) { _, err := common.RunString(rt, ` - let bytes = crypto.randomBytes(5); + var bytes = crypto.randomBytes(5); if (bytes.length !== 5) { throw new Error("Incorrect size: " + bytes.length); }`) @@ -78,8 +79,8 @@ func TestCryptoAlgorithms(t *testing.T) { t.Run("MD4", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "aa010fbc1d14c795d86ef98c95479d17"; - let hash = crypto.md4("hello world", "hex"); + var correct = "aa010fbc1d14c795d86ef98c95479d17"; + var hash = crypto.md4("hello world", "hex"); if (hash !== correct) { throw new Error("Hash mismatch: " + hash); }`) @@ -88,8 +89,8 @@ func TestCryptoAlgorithms(t *testing.T) { t.Run("MD5", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "5eb63bbbe01eeed093cb22bb8f5acdc3"; - let hash = crypto.md5("hello world", "hex"); + var correct = "5eb63bbbe01eeed093cb22bb8f5acdc3"; + var hash = crypto.md5("hello world", "hex"); if (hash !== correct) { throw new Error("Hash mismatch: " + hash); }`) @@ -99,8 +100,8 @@ func TestCryptoAlgorithms(t *testing.T) { t.Run("SHA1", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed"; - let hash = crypto.sha1("hello world", "hex"); + var correct = "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed"; + var hash = crypto.sha1("hello world", "hex"); if (hash !== correct) { throw new Error("Hash mismatch: " + hash); }`) @@ -110,8 +111,8 @@ func TestCryptoAlgorithms(t *testing.T) { t.Run("SHA256", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"; - let hash = crypto.sha256("hello world", "hex"); + var correct = "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"; + var hash = crypto.sha256("hello world", "hex"); if (hash !== correct) { throw new Error("Hash mismatch: " + hash); }`) @@ -121,8 +122,8 @@ func TestCryptoAlgorithms(t *testing.T) { t.Run("SHA384", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "fdbd8e75a67f29f701a4e040385e2e23986303ea10239211af907fcbb83578b3e417cb71ce646efd0819dd8c088de1bd"; - let hash = crypto.sha384("hello world", "hex"); + var correct = "fdbd8e75a67f29f701a4e040385e2e23986303ea10239211af907fcbb83578b3e417cb71ce646efd0819dd8c088de1bd"; + var hash = crypto.sha384("hello world", "hex"); if (hash !== correct) { throw new Error("Hash mismatch: " + hash); }`) @@ -132,8 +133,8 @@ func TestCryptoAlgorithms(t *testing.T) { t.Run("SHA512", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "309ecc489c12d6eb4cc40f50c902f2b4d0ed77ee511a7c7a9bcd3ca86d4cd86f989dd35bc5ff499670da34255b45b0cfd830e81f605dcf7dc5542e93ae9cd76f"; - let hash = crypto.sha512("hello world", "hex"); + var correct = "309ecc489c12d6eb4cc40f50c902f2b4d0ed77ee511a7c7a9bcd3ca86d4cd86f989dd35bc5ff499670da34255b45b0cfd830e81f605dcf7dc5542e93ae9cd76f"; + var hash = crypto.sha512("hello world", "hex"); if (hash !== correct) { throw new Error("Hash mismatch: " + hash); }`) @@ -143,8 +144,8 @@ func TestCryptoAlgorithms(t *testing.T) { t.Run("SHA512_224", func(t *testing.T) { _, err := common.RunString(rt, ` - let hash = crypto.sha512_224("hello world", "hex"); - const correct = "22e0d52336f64a998085078b05a6e37b26f8120f43bf4db4c43a64ee"; + var hash = crypto.sha512_224("hello world", "hex"); + var correct = "22e0d52336f64a998085078b05a6e37b26f8120f43bf4db4c43a64ee"; if (hash !== correct) { throw new Error("Hash mismatch: " + hash); }`) @@ -154,8 +155,8 @@ func TestCryptoAlgorithms(t *testing.T) { t.Run("SHA512_256", func(t *testing.T) { _, err := common.RunString(rt, ` - let hash = crypto.sha512_256("hello world", "hex"); - const correct = "0ac561fac838104e3f2e4ad107b4bee3e938bf15f2b15f009ccccd61a913f017"; + var hash = crypto.sha512_256("hello world", "hex"); + var correct = "0ac561fac838104e3f2e4ad107b4bee3e938bf15f2b15f009ccccd61a913f017"; if (hash !== correct) { throw new Error("Hash mismatch: " + hash); }`) @@ -165,8 +166,8 @@ func TestCryptoAlgorithms(t *testing.T) { t.Run("RIPEMD160", func(t *testing.T) { _, err := common.RunString(rt, ` - let hash = crypto.ripemd160("hello world", "hex"); - const correct = "98c615784ccb5fe5936fbc0cbe9dfdb408d92f0f"; + var hash = crypto.ripemd160("hello world", "hex"); + var correct = "98c615784ccb5fe5936fbc0cbe9dfdb408d92f0f"; if (hash !== correct) { throw new Error("Hash mismatch: " + hash); }`) @@ -195,11 +196,11 @@ func TestStreamingApi(t *testing.T) { // Empty strings are still hashable t.Run("Empty", func(t *testing.T) { _, err := common.RunString(rt, ` - const correctHex = "d41d8cd98f00b204e9800998ecf8427e"; + var correctHex = "d41d8cd98f00b204e9800998ecf8427e"; - let hasher = crypto.createHash("md5"); + var hasher = crypto.createHash("md5"); - const resultHex = hasher.digest("hex"); + var resultHex = hasher.digest("hex"); if (resultHex !== correctHex) { throw new Error("Hex encoding mismatch: " + resultHex); }`) @@ -209,12 +210,12 @@ func TestStreamingApi(t *testing.T) { t.Run("UpdateOnce", func(t *testing.T) { _, err := common.RunString(rt, ` - const correctHex = "5eb63bbbe01eeed093cb22bb8f5acdc3"; + var correctHex = "5eb63bbbe01eeed093cb22bb8f5acdc3"; - let hasher = crypto.createHash("md5"); + var hasher = crypto.createHash("md5"); hasher.update("hello world"); - const resultHex = hasher.digest("hex"); + var resultHex = hasher.digest("hex"); if (resultHex !== correctHex) { throw new Error("Hex encoding mismatch: " + resultHex); }`) @@ -224,14 +225,14 @@ func TestStreamingApi(t *testing.T) { t.Run("UpdateMultiple", func(t *testing.T) { _, err := common.RunString(rt, ` - const correctHex = "5eb63bbbe01eeed093cb22bb8f5acdc3"; + var correctHex = "5eb63bbbe01eeed093cb22bb8f5acdc3"; - let hasher = crypto.createHash("md5"); + var hasher = crypto.createHash("md5"); hasher.update("hello"); hasher.update(" "); hasher.update("world"); - const resultHex = hasher.digest("hex"); + var resultHex = hasher.digest("hex"); if (resultHex !== correctHex) { throw new Error("Hex encoding mismatch: " + resultHex); }`) @@ -259,31 +260,31 @@ func TestOutputEncoding(t *testing.T) { t.Run("Valid", func(t *testing.T) { _, err := common.RunString(rt, ` - const correctHex = "5eb63bbbe01eeed093cb22bb8f5acdc3"; - const correctBase64 = "XrY7u+Ae7tCTyyK7j1rNww=="; - const correctBase64URL = "XrY7u-Ae7tCTyyK7j1rNww==" - const correctBase64RawURL = "XrY7u-Ae7tCTyyK7j1rNww"; - const correctBinary = [94,182,59,187,224,30,238,208,147,203,34,187,143,90,205,195]; + var correctHex = "5eb63bbbe01eeed093cb22bb8f5acdc3"; + var correctBase64 = "XrY7u+Ae7tCTyyK7j1rNww=="; + var correctBase64URL = "XrY7u-Ae7tCTyyK7j1rNww==" + var correctBase64RawURL = "XrY7u-Ae7tCTyyK7j1rNww"; + var correctBinary = [94,182,59,187,224,30,238,208,147,203,34,187,143,90,205,195]; - let hasher = crypto.createHash("md5"); + var hasher = crypto.createHash("md5"); hasher.update("hello world"); - const resultHex = hasher.digest("hex"); + var resultHex = hasher.digest("hex"); if (resultHex !== correctHex) { throw new Error("Hex encoding mismatch: " + resultHex); } - const resultBase64 = hasher.digest("base64"); + var resultBase64 = hasher.digest("base64"); if (resultBase64 !== correctBase64) { throw new Error("Base64 encoding mismatch: " + resultBase64); } - const resultBase64URL = hasher.digest("base64url"); + var resultBase64URL = hasher.digest("base64url"); if (resultBase64URL !== correctBase64URL) { throw new Error("Base64 URL encoding mismatch: " + resultBase64URL); } - const resultBase64RawURL = hasher.digest("base64rawurl"); + var resultBase64RawURL = hasher.digest("base64rawurl"); if (resultBase64RawURL !== correctBase64RawURL) { throw new Error("Base64 raw URL encoding mismatch: " + resultBase64RawURL); } @@ -300,7 +301,7 @@ func TestOutputEncoding(t *testing.T) { return true; } - const resultBinary = hasher.digest("binary"); + var resultBinary = hasher.digest("binary"); if (!arraysEqual(resultBinary, correctBinary)) { throw new Error("Binary encoding mismatch: " + JSON.stringify(resultBinary)); } @@ -311,7 +312,7 @@ func TestOutputEncoding(t *testing.T) { t.Run("Invalid", func(t *testing.T) { _, err := common.RunString(rt, ` - let hasher = crypto.createHash("md5"); + var hasher = crypto.createHash("md5"); hasher.update("hello world"); hasher.digest("someInvalidEncoding"); `) @@ -352,10 +353,10 @@ func TestHMac(t *testing.T) { rt.Set("algorithm", rt.ToValue(algorithm)) t.Run(algorithm+" hasher: valid", func(t *testing.T) { _, err := common.RunString(rt, ` - let hasher = crypto.createHMAC(algorithm, "a secret"); + var hasher = crypto.createHMAC(algorithm, "a secret"); hasher.update("some data to hash"); - const resultHex = hasher.digest("hex"); + var resultHex = hasher.digest("hex"); if (resultHex !== correctHex) { throw new Error("Hex encoding mismatch: " + resultHex); }`) @@ -365,7 +366,7 @@ func TestHMac(t *testing.T) { t.Run(algorithm+" wrapper: valid", func(t *testing.T) { _, err := common.RunString(rt, ` - let resultHex = crypto.hmac(algorithm, "a secret", "some data to hash", "hex"); + var resultHex = crypto.hmac(algorithm, "a secret", "some data to hash", "hex"); if (resultHex !== correctHex) { throw new Error("Hex encoding mismatch: " + resultHex); }`) @@ -385,10 +386,10 @@ func TestHMac(t *testing.T) { rt.Set("algorithm", rt.ToValue(algorithm)) t.Run(algorithm+" hasher: invalid", func(t *testing.T) { _, err := common.RunString(rt, ` - let hasher = crypto.createHMAC(algorithm, "a secret"); + var hasher = crypto.createHMAC(algorithm, "a secret"); hasher.update("some data to hash"); - const resultHex = hasher.digest("hex"); + var resultHex = hasher.digest("hex"); if (resultHex !== correctHex) { throw new Error("Hex encoding mismatch: " + resultHex); }`) @@ -398,7 +399,7 @@ func TestHMac(t *testing.T) { t.Run(algorithm+" wrapper: invalid", func(t *testing.T) { _, err := common.RunString(rt, ` - let resultHex = crypto.hmac(algorithm, "a secret", "some data to hash", "hex"); + var resultHex = crypto.hmac(algorithm, "a secret", "some data to hash", "hex"); if (resultHex !== correctHex) { throw new Error("Hex encoding mismatch: " + resultHex); }`) @@ -417,40 +418,40 @@ func TestAWSv4(t *testing.T) { rt.Set("crypto", common.Bind(rt, New(), &ctx)) _, err := common.RunString(rt, ` - let HexEncode = crypto.hexEncode; - let HmacSHA256 = function(data, key) { + var HexEncode = crypto.hexEncode; + var HmacSHA256 = function(data, key) { return crypto.hmac("sha256",key, data, "binary"); }; - let expectedKDate = '969fbb94feb542b71ede6f87fe4d5fa29c789342b0f407474670f0c2489e0a0d' - let expectedKRegion = '69daa0209cd9c5ff5c8ced464a696fd4252e981430b10e3d3fd8e2f197d7a70c' - let expectedKService = 'f72cfd46f26bc4643f06a11eabb6c0ba18780c19a8da0c31ace671265e3c87fa' - let expectedKSigning = 'f4780e2d9f65fa895f9c67b32ce1baf0b0d8a43505a000a1a9e090d414db404d' + var expectedKDate = '969fbb94feb542b71ede6f87fe4d5fa29c789342b0f407474670f0c2489e0a0d' + var expectedKRegion = '69daa0209cd9c5ff5c8ced464a696fd4252e981430b10e3d3fd8e2f197d7a70c' + var expectedKService = 'f72cfd46f26bc4643f06a11eabb6c0ba18780c19a8da0c31ace671265e3c87fa' + var expectedKSigning = 'f4780e2d9f65fa895f9c67b32ce1baf0b0d8a43505a000a1a9e090d414db404d' - let key = 'wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY'; - let dateStamp = '20120215'; - let regionName = 'us-east-1'; - let serviceName = 'iam'; + var key = 'wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY'; + var dateStamp = '20120215'; + var regionName = 'us-east-1'; + var serviceName = 'iam'; - let kDate = HmacSHA256(dateStamp, "AWS4" + key); - let kRegion = HmacSHA256(regionName, kDate); - let kService = HmacSHA256(serviceName, kRegion); - let kSigning = HmacSHA256("aws4_request", kService); + var kDate = HmacSHA256(dateStamp, "AWS4" + key); + var kRegion = HmacSHA256(regionName, kDate); + var kService = HmacSHA256(serviceName, kRegion); + var kSigning = HmacSHA256("aws4_request", kService); - let hexKDate = HexEncode(kDate); + var hexKDate = HexEncode(kDate); if (expectedKDate != hexKDate) { throw new Error("Wrong kDate: expected '" + expectedKDate + "' got '" + hexKDate + "'"); } - let hexKRegion = HexEncode(kRegion); + var hexKRegion = HexEncode(kRegion); if (expectedKRegion != hexKRegion) { throw new Error("Wrong kRegion: expected '" + expectedKRegion + "' got '" + hexKRegion + "'"); } - let hexKService = HexEncode(kService); + var hexKService = HexEncode(kService); if (expectedKService != hexKService) { throw new Error("Wrong kService: expected '" + expectedKService + "' got '" + hexKService + "'"); } - let hexKSigning = HexEncode(kSigning); + var hexKSigning = HexEncode(kSigning); if (expectedKSigning != hexKSigning) { throw new Error("Wrong kSigning: expected '" + expectedKSigning + "' got '" + hexKSigning + "'"); } diff --git a/js/modules/k6/crypto/x509/x509.go b/js/modules/k6/crypto/x509/x509.go index 0fff2c4c95e..28480bd6aa5 100644 --- a/js/modules/k6/crypto/x509/x509.go +++ b/js/modules/k6/crypto/x509/x509.go @@ -32,8 +32,9 @@ import ( "fmt" "time" - "github.com/loadimpact/k6/js/common" "github.com/pkg/errors" + + "github.com/loadimpact/k6/js/common" ) // X509 certificate functionality diff --git a/js/modules/k6/crypto/x509/x509_test.go b/js/modules/k6/crypto/x509/x509_test.go index d35912f4bf4..7f4cce804a6 100644 --- a/js/modules/k6/crypto/x509/x509_test.go +++ b/js/modules/k6/crypto/x509/x509_test.go @@ -24,12 +24,12 @@ import ( "context" gox509 "crypto/x509" "fmt" - "strings" "testing" "github.com/dop251/goja" - "github.com/loadimpact/k6/js/common" "github.com/stretchr/testify/assert" + + "github.com/loadimpact/k6/js/common" ) func makeRuntime() *goja.Runtime { @@ -49,7 +49,7 @@ type Material struct { } var material = Material{ //nolint:gochecknoglobals - dsaCertificate: template(`-----BEGIN CERTIFICATE----- + dsaCertificate: `-----BEGIN CERTIFICATE----- MIIFnzCCBUSgAwIBAgIJAPOE4rArGHVcMAsGCWCGSAFlAwQDAjCBsTELMAkGA1UE BhMCWloxGTAXBgNVBAgMEEtvcHVuY2V6aXMgS3JhaXMxETAPBgNVBAcMCEFzaHRp bm9rMRwwGgYDVQQKDBNFeHVtYnJhbiBDb252ZW50aW9uMRkwFwYDVQQLDBBFeHVt @@ -81,8 +81,8 @@ JqGGJU+MCQZEoTAfBgNVHSMEGDAWgBSSb364iDHRI6/2JqGGJU+MCQZEoTAPBgNV HRMBAf8EBTADAQH/MAsGCWCGSAFlAwQDAgNIADBFAiEA1nr63IX9aaGUPeOUC0Bh w3Y7mpv5+sVgtoIi8ljxVSICIFCpEl70YjRVIUKL8N/lJwKxisrJ4+Xxg/DIeGP8 L8GA ------END CERTIFICATE-----`), - ecdsaCertificate: template(`-----BEGIN CERTIFICATE----- +-----END CERTIFICATE-----`, + ecdsaCertificate: `-----BEGIN CERTIFICATE----- MIIDXjCCAwWgAwIBAgICBNIwCgYIKoZIzj0EAwIwgdsxCzAJBgNVBAYTAlpaMRkw FwYDVQQIExBLb3B1bmNlemlzIEtyYWlzMREwDwYDVQQHEwhBc2h0aW5vazEaMBgG A1UECRMRMjIxQiBCYWtlciBTdHJlZXQxDjAMBgNVBBETBTk5OTk5MRwwGgYDVQQK @@ -102,8 +102,8 @@ Ly9wcmVzcy5leGNvdW5jaWwuenqGJ2h0dHA6Ly9sZWFybmluZy5leGNvdW5jaWwu enovaW5kZXguaHRtbDAKBggqhkjOPQQDAgNHADBEAiA/X4Y+Zaw4ziqL4grkY+rm srWfS/JGxLvN49r68cczSwIgWEXFIHMwE+OhKC6z01mIPe2G2CguYHukWyL+BHtT +20= ------END CERTIFICATE-----`), - rsaCertificate: template(`-----BEGIN CERTIFICATE----- +-----END CERTIFICATE-----`, + rsaCertificate: `-----BEGIN CERTIFICATE----- MIIE6zCCA9OgAwIBAgICBNIwDQYJKoZIhvcNAQELBQAwgdsxCzAJBgNVBAYTAlpa MRkwFwYDVQQIExBLb3B1bmNlemlzIEtyYWlzMREwDwYDVQQHEwhBc2h0aW5vazEa MBgGA1UECRMRMjIxQiBCYWtlciBTdHJlZXQxDjAMBgNVBBETBTk5OTk5MRwwGgYD @@ -131,17 +131,13 @@ gzg3dNaCY65aH0cJE/dVwiS/F2XTr1zvr+uBPExgrA21+FSIlHM0Dot+VGKdCLEO 6HugOCDBdzKF2hsHeI5LvgXUX5zQ0gnsd93+QuxUmiN7QZZs8tDMD/+efo4OWvp/ xytSVXVn+cECQLg9hVn+Zx3XO2FA0eOzaWEONnUGghT/Ivw06lUxis5tkAoAU93d ddBqJe0XUeAX8Zr6EJ82 ------END CERTIFICATE-----`), - publicKey: template(`-----BEGIN PUBLIC KEY----- +-----END CERTIFICATE-----`, + publicKey: `-----BEGIN PUBLIC KEY----- MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDXMLr/Y/vUtIFY75jj0YXfp6lQ 7iEIbps3BvRE4isTpxs8fXLnLM8LAuJScxiKyrGnj8EMb7LIHkSMBlz6iVj9atY6 EUEm/VHUnElNquzGyBA50TCfpv6NHPaTvOoB45yQbZ/YB4LO+CsT9eIMDZ4tcU9Z +xD10ifJhhIwpZUFIQIDAQAB ------END PUBLIC KEY-----`), -} - -func template(value string) string { - return fmt.Sprintf("`%s`", value) +-----END PUBLIC KEY-----`, } func TestParse(t *testing.T) { @@ -159,21 +155,21 @@ func TestParse(t *testing.T) { t.Run("ParseFailure", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; + var pem = %q; x509.parse(pem);`, material.publicKey)) if assert.Error(t, err) { - assert.True(t, strings.HasPrefix( + assert.Contains(t, err.Error(), "GoError: failed to parse certificate", - )) + ) } }) t.Run("SignatureAlgorithm", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.signatureAlgorithm; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.signatureAlgorithm; if (value !== "SHA256-RSA") { throw new Error("Bad signature algorithm: " + value); }`, material.rsaCertificate)) @@ -182,8 +178,8 @@ func TestParse(t *testing.T) { t.Run("Subject", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); + var pem = %q; + var cert = x509.parse(pem); if (typeof cert.subject !== "object") { throw new Error("Bad subject: " + typeof cert.subject); }`, material.rsaCertificate)) @@ -192,9 +188,9 @@ func TestParse(t *testing.T) { t.Run("SubjectCommonName", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.subject ? cert.subject.commonName : null; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.subject ? cert.subject.commonName : null; if (value !== "excouncil.zz") { throw new Error("Bad subject common name: " + value); }`, material.rsaCertificate)) @@ -203,9 +199,9 @@ func TestParse(t *testing.T) { t.Run("SubjectCountry", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.subject ? cert.subject.country : null; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.subject ? cert.subject.country : null; if (value !== "ZZ") { throw new Error("Bad subject country: " + value); }`, material.rsaCertificate)) @@ -214,9 +210,9 @@ func TestParse(t *testing.T) { t.Run("SubjectPostalCode", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.subject ? cert.subject.postalCode : null; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.subject ? cert.subject.postalCode : null; if (value !== "99999") { throw new Error("Bad subject postal code: " + value); }`, material.rsaCertificate)) @@ -225,9 +221,9 @@ func TestParse(t *testing.T) { t.Run("SubjectProvince", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.subject ? cert.subject.stateOrProvinceName : null; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.subject ? cert.subject.stateOrProvinceName : null; if (value !== "Kopuncezis Krais") { throw new Error("Bad subject province: " + value); }`, material.rsaCertificate)) @@ -236,9 +232,9 @@ func TestParse(t *testing.T) { t.Run("SubjectLocality", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.subject ? cert.subject.localityName : null; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.subject ? cert.subject.localityName : null; if (value !== "Ashtinok") { throw new Error("Bad subject locality: " + value); }`, material.rsaCertificate)) @@ -247,9 +243,9 @@ func TestParse(t *testing.T) { t.Run("SubjectStreetAddress", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.subject ? cert.subject.streetAddress : null; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.subject ? cert.subject.streetAddress : null; if (value !== "221B Baker Street") { throw new Error("Bad subject street address: " + value); }`, material.rsaCertificate)) @@ -258,9 +254,9 @@ func TestParse(t *testing.T) { t.Run("SubjectOrganization", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.subject ? cert.subject.organizationName : null; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.subject ? cert.subject.organizationName : null; if (value !== "Exumbran Convention") { throw new Error("Bad subject organization: " + value); }`, material.rsaCertificate)) @@ -269,9 +265,9 @@ func TestParse(t *testing.T) { t.Run("SubjectOrganizationalUnit", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const values = + var pem = %q; + var cert = x509.parse(pem); + var values = cert.subject ? cert.subject.organizationalUnitName : null; if (!( values.length === 2 && @@ -287,11 +283,11 @@ func TestParse(t *testing.T) { t.Run("SubjectNames", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const values = cert.subject ? cert.subject.names : null; - const strings = values - ? values.map(entry => entry.type + ": " + entry.value) + var pem = %q; + var cert = x509.parse(pem); + var values = cert.subject ? cert.subject.names : null; + var strings = values + ? values.map(function(entry) { return entry.type + ": " + entry.value}) : null; Array.prototype.includes = function (value) { return this.indexOf(value) !== -1 } @@ -316,8 +312,8 @@ func TestParse(t *testing.T) { t.Run("Issuer", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); + var pem = %q; + var cert = x509.parse(pem); if (typeof cert.issuer !== "object") { throw new Error("Bad issuer: " + typeof cert.issuer); }`, material.rsaCertificate)) @@ -326,9 +322,9 @@ func TestParse(t *testing.T) { t.Run("IssuerCommonName", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.issuer ? cert.issuer.commonName : null; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.issuer ? cert.issuer.commonName : null; if (value !== "excouncil.zz") { throw new Error("Bad issuer common name: " + value); }`, material.rsaCertificate)) @@ -337,9 +333,9 @@ func TestParse(t *testing.T) { t.Run("IssuerCountry", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.issuer ? cert.issuer.country : null; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.issuer ? cert.issuer.country : null; if (value !== "ZZ") { throw new Error("Bad issuer country: " + value); }`, material.rsaCertificate)) @@ -348,9 +344,9 @@ func TestParse(t *testing.T) { t.Run("IssuerProvince", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.issuer ? cert.issuer.stateOrProvinceName : null; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.issuer ? cert.issuer.stateOrProvinceName : null; if (value !== "Kopuncezis Krais") { throw new Error("Bad issuer province: " + value); }`, material.rsaCertificate)) @@ -359,9 +355,9 @@ func TestParse(t *testing.T) { t.Run("IssuerLocality", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.issuer ? cert.issuer.localityName : null; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.issuer ? cert.issuer.localityName : null; if (value !== "Ashtinok") { throw new Error("Bad issuer locality: " + value); }`, material.rsaCertificate)) @@ -370,9 +366,9 @@ func TestParse(t *testing.T) { t.Run("IssuerOrganization", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.issuer ? cert.issuer.organizationName : null; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.issuer ? cert.issuer.organizationName : null; if (value !== "Exumbran Convention") { throw new Error("Bad issuer organization: " + value); }`, material.rsaCertificate)) @@ -381,11 +377,11 @@ func TestParse(t *testing.T) { t.Run("IssuerNames", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const values = cert.issuer ? cert.issuer.names : null; - const strings = values - ? values.map(entry => entry.type + ": " + entry.value) + var pem = %q; + var cert = x509.parse(pem); + var values = cert.issuer ? cert.issuer.names : null; + var strings = values + ? values.map(function(entry) { return entry.type + ": " + entry.value}) : null; Array.prototype.includes = function (value) { return this.indexOf(value) !== -1 } @@ -410,9 +406,9 @@ func TestParse(t *testing.T) { t.Run("NotBefore", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.notBefore; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.notBefore; if (value !== "2019-01-01T00:00:00Z") { throw new Error("Bad lower bound: " + value) }`, material.rsaCertificate)) @@ -421,9 +417,9 @@ func TestParse(t *testing.T) { t.Run("NotAfter", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.notAfter; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.notAfter; if (value !== "2020-01-01T00:00:00Z") { throw new Error("Bad upper bound: " + value); }`, material.rsaCertificate)) @@ -432,9 +428,9 @@ func TestParse(t *testing.T) { t.Run("AltNames", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const values = cert.altNames; + var pem = %q; + var cert = x509.parse(pem); + var values = cert.altNames; if (!( values.length === 8 && values[0] === "council.exumbran.zz" && @@ -453,10 +449,10 @@ func TestParse(t *testing.T) { t.Run("FingerPrint", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.fingerPrint; - const expected = [ + var pem = %q; + var cert = x509.parse(pem); + var value = cert.fingerPrint; + var expected = [ 85, 119, 3, 199, 150, 144, 202, 145, 178, 46, 205, 132, 37, 235, 251, 208, 139, 161, 143, 14 ] @@ -468,8 +464,8 @@ func TestParse(t *testing.T) { t.Run("PublicKey", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); + var pem = %q; + var cert = x509.parse(pem); if (typeof cert.publicKey !== "object") { throw new Error("Bad public key: " + typeof cert.publicKey); }`, material.rsaCertificate)) @@ -478,9 +474,9 @@ func TestParse(t *testing.T) { t.Run("RSAPublicKey", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.publicKey; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.publicKey; if (!( value && typeof value === "object" && @@ -496,9 +492,9 @@ func TestParse(t *testing.T) { t.Run("RSAPublicKeyExponent", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.publicKey ? cert.publicKey.key.e : null; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.publicKey ? cert.publicKey.key.e : null; if (value !== 65537) { throw new Error("Bad RSA public key exponent: " + value); }`, material.rsaCertificate)) @@ -507,10 +503,10 @@ func TestParse(t *testing.T) { t.Run("RSAPublicKeyModulus", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.publicKey ? cert.publicKey.key.n.bytes() : null; - const expected = [ + var pem = %q; + var cert = x509.parse(pem); + var value = cert.publicKey ? cert.publicKey.key.n.bytes() : null; + var expected = [ 223, 249, 234, 71, 180, 36, 28, 62, 84, 141, 177, 118, 53, 2, 175, 45, 167, 89, 155, 216, 103, 86, 32, 216, 42, 92, 84, 125, 183, 102, 217, 40, 255, 129, 38, 203, 175, 98, 209, 147, 151, 106, 250, 12, @@ -538,9 +534,9 @@ func TestParse(t *testing.T) { t.Run("DSAPublicKey", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.publicKey; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.publicKey; if (!( value && typeof value === "object" && @@ -555,9 +551,9 @@ func TestParse(t *testing.T) { t.Run("ECDSAPublicKey", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const cert = x509.parse(pem); - const value = cert.publicKey; + var pem = %q; + var cert = x509.parse(pem); + var value = cert.publicKey; if (!( value && typeof value === "object" && @@ -586,8 +582,8 @@ func TestGetAltNames(t *testing.T) { t.Run("Success", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const altNames = x509.getAltNames(pem); + var pem = %q; + var altNames = x509.getAltNames(pem); if (!( Array.isArray(altNames) && altNames.length === 8 && @@ -620,8 +616,8 @@ func TestGetIssuer(t *testing.T) { t.Run("Success", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const issuer = x509.getIssuer(pem); + var pem = %q; + var issuer = x509.getIssuer(pem); if (!( typeof issuer === "object" && issuer.commonName === "excouncil.zz" && @@ -652,8 +648,8 @@ func TestGetSubject(t *testing.T) { t.Run("Success", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - const pem = %s; - const subject = x509.getSubject(pem); + var pem = %q; + var subject = x509.getSubject(pem); if (!( typeof subject === "object" && subject.commonName === "excouncil.zz" && diff --git a/js/modules/k6/encoding/encoding_test.go b/js/modules/k6/encoding/encoding_test.go index ac08edcdbf4..1ae395916ac 100644 --- a/js/modules/k6/encoding/encoding_test.go +++ b/js/modules/k6/encoding/encoding_test.go @@ -25,8 +25,9 @@ import ( "testing" "github.com/dop251/goja" - "github.com/loadimpact/k6/js/common" "github.com/stretchr/testify/assert" + + "github.com/loadimpact/k6/js/common" ) func TestEncodingAlgorithms(t *testing.T) { @@ -43,8 +44,8 @@ func TestEncodingAlgorithms(t *testing.T) { t.Run("Base64", func(t *testing.T) { t.Run("DefaultEnc", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "aGVsbG8gd29ybGQ="; - let encoded = encoding.b64encode("hello world"); + var correct = "aGVsbG8gd29ybGQ="; + var encoded = encoding.b64encode("hello world"); if (encoded !== correct) { throw new Error("Encoding mismatch: " + encoded); }`) @@ -52,8 +53,8 @@ func TestEncodingAlgorithms(t *testing.T) { }) t.Run("DefaultDec", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "hello world"; - let decoded = encoding.b64decode("aGVsbG8gd29ybGQ="); + var correct = "hello world"; + var decoded = encoding.b64decode("aGVsbG8gd29ybGQ="); if (decoded !== correct) { throw new Error("Decoding mismatch: " + decoded); }`) @@ -61,8 +62,8 @@ func TestEncodingAlgorithms(t *testing.T) { }) t.Run("DefaultUnicodeEnc", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "44GT44KT44Gr44Gh44Gv5LiW55WM"; - let encoded = encoding.b64encode("こんにちは世界", "std"); + var correct = "44GT44KT44Gr44Gh44Gv5LiW55WM"; + var encoded = encoding.b64encode("こんにちは世界", "std"); if (encoded !== correct) { throw new Error("Encoding mismatch: " + encoded); }`) @@ -70,8 +71,8 @@ func TestEncodingAlgorithms(t *testing.T) { }) t.Run("DefaultUnicodeDec", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "こんにちは世界"; - let decoded = encoding.b64decode("44GT44KT44Gr44Gh44Gv5LiW55WM"); + var correct = "こんにちは世界"; + var decoded = encoding.b64decode("44GT44KT44Gr44Gh44Gv5LiW55WM"); if (decoded !== correct) { throw new Error("Decoding mismatch: " + decoded); }`) @@ -79,8 +80,8 @@ func TestEncodingAlgorithms(t *testing.T) { }) t.Run("StdEnc", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "aGVsbG8gd29ybGQ="; - let encoded = encoding.b64encode("hello world", "std"); + var correct = "aGVsbG8gd29ybGQ="; + var encoded = encoding.b64encode("hello world", "std"); if (encoded !== correct) { throw new Error("Encoding mismatch: " + encoded); }`) @@ -88,8 +89,8 @@ func TestEncodingAlgorithms(t *testing.T) { }) t.Run("StdDec", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "hello world"; - let decoded = encoding.b64decode("aGVsbG8gd29ybGQ=", "std"); + var correct = "hello world"; + var decoded = encoding.b64decode("aGVsbG8gd29ybGQ=", "std"); if (decoded !== correct) { throw new Error("Decoding mismatch: " + decoded); }`) @@ -97,8 +98,8 @@ func TestEncodingAlgorithms(t *testing.T) { }) t.Run("RawStdEnc", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "aGVsbG8gd29ybGQ"; - let encoded = encoding.b64encode("hello world", "rawstd"); + var correct = "aGVsbG8gd29ybGQ"; + var encoded = encoding.b64encode("hello world", "rawstd"); if (encoded !== correct) { throw new Error("Encoding mismatch: " + encoded); }`) @@ -106,8 +107,8 @@ func TestEncodingAlgorithms(t *testing.T) { }) t.Run("RawStdDec", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "hello world"; - let decoded = encoding.b64decode("aGVsbG8gd29ybGQ", "rawstd"); + var correct = "hello world"; + var decoded = encoding.b64decode("aGVsbG8gd29ybGQ", "rawstd"); if (decoded !== correct) { throw new Error("Decoding mismatch: " + decoded); }`) @@ -115,8 +116,8 @@ func TestEncodingAlgorithms(t *testing.T) { }) t.Run("URLEnc", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "5bCP6aO85by-Li4="; - let encoded = encoding.b64encode("小飼弾..", "url"); + var correct = "5bCP6aO85by-Li4="; + var encoded = encoding.b64encode("小飼弾..", "url"); if (encoded !== correct) { throw new Error("Encoding mismatch: " + encoded); }`) @@ -124,8 +125,8 @@ func TestEncodingAlgorithms(t *testing.T) { }) t.Run("URLDec", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "小飼弾.."; - let decoded = encoding.b64decode("5bCP6aO85by-Li4=", "url"); + var correct = "小飼弾.."; + var decoded = encoding.b64decode("5bCP6aO85by-Li4=", "url"); if (decoded !== correct) { throw new Error("Decoding mismatch: " + decoded); }`) @@ -133,8 +134,8 @@ func TestEncodingAlgorithms(t *testing.T) { }) t.Run("RawURLEnc", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "5bCP6aO85by-Li4"; - let encoded = encoding.b64encode("小飼弾..", "rawurl"); + var correct = "5bCP6aO85by-Li4"; + var encoded = encoding.b64encode("小飼弾..", "rawurl"); if (encoded !== correct) { throw new Error("Encoding mismatch: " + encoded); }`) @@ -142,8 +143,8 @@ func TestEncodingAlgorithms(t *testing.T) { }) t.Run("RawURLDec", func(t *testing.T) { _, err := common.RunString(rt, ` - const correct = "小飼弾.."; - let decoded = encoding.b64decode("5bCP6aO85by-Li4", "rawurl"); + var correct = "小飼弾.."; + var decoded = encoding.b64decode("5bCP6aO85by-Li4", "rawurl"); if (decoded !== correct) { throw new Error("Decoding mismatch: " + decoded); }`) diff --git a/js/modules/k6/html/element.go b/js/modules/k6/html/element.go index 96770512a7c..65c9f8c916d 100644 --- a/js/modules/k6/html/element.go +++ b/js/modules/k6/html/element.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package html import ( diff --git a/js/modules/k6/html/element_test.go b/js/modules/k6/html/element_test.go index 48120e3c581..0f0d1704548 100644 --- a/js/modules/k6/html/element_test.go +++ b/js/modules/k6/html/element_test.go @@ -25,8 +25,9 @@ import ( "testing" "github.com/dop251/goja" - "github.com/loadimpact/k6/js/common" "github.com/stretchr/testify/assert" + + "github.com/loadimpact/k6/js/common" ) const testHTMLElem = ` @@ -62,7 +63,7 @@ func TestElement(t *testing.T) { rt.Set("html", common.Bind(rt, &HTML{}, &ctx)) // compileProtoElem() - _, err := common.RunString(rt, `let doc = html.parseHTML(src)`) + _, err := common.RunString(rt, `var doc = html.parseHTML(src)`) assert.NoError(t, err) assert.IsType(t, Selection{}, rt.Get("doc").Export()) diff --git a/js/modules/k6/html/elements.go b/js/modules/k6/html/elements.go index b6426ed7d10..a1b1f344caa 100644 --- a/js/modules/k6/html/elements.go +++ b/js/modules/k6/html/elements.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package html import ( diff --git a/js/modules/k6/html/elements_gen_test.go b/js/modules/k6/html/elements_gen_test.go index 5bc77255a61..5065755c6ce 100644 --- a/js/modules/k6/html/elements_gen_test.go +++ b/js/modules/k6/html/elements_gen_test.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package html import ( @@ -5,8 +25,9 @@ import ( "testing" "github.com/dop251/goja" - "github.com/loadimpact/k6/js/common" "github.com/stretchr/testify/assert" + + "github.com/loadimpact/k6/js/common" ) var textTests = []struct { @@ -298,7 +319,7 @@ var urlTests = []struct { } const testGenElems = ` - + @@ -390,7 +411,7 @@ func TestGenElements(t *testing.T) { rt.Set("src", testGenElems) rt.Set("html", common.Bind(rt, &HTML{}, &ctx)) - _, err := common.RunString(rt, "let doc = html.parseHTML(src)") + _, err := common.RunString(rt, "var doc = html.parseHTML(src)") assert.NoError(t, err) assert.IsType(t, Selection{}, rt.Get("doc").Export()) diff --git a/js/modules/k6/html/elements_test.go b/js/modules/k6/html/elements_test.go index 9c212b577b8..a8aec7e15eb 100644 --- a/js/modules/k6/html/elements_test.go +++ b/js/modules/k6/html/elements_test.go @@ -25,8 +25,9 @@ import ( "testing" "github.com/dop251/goja" - "github.com/loadimpact/k6/js/common" "github.com/stretchr/testify/assert" + + "github.com/loadimpact/k6/js/common" ) const testHTMLElems = ` @@ -43,7 +44,7 @@ const testHTMLElems = ` 6 - +
@@ -92,7 +93,7 @@ func TestElements(t *testing.T) { rt.Set("src", testHTMLElems) rt.Set("html", common.Bind(rt, &HTML{}, &ctx)) - _, err := common.RunString(rt, `let doc = html.parseHTML(src)`) + _, err := common.RunString(rt, `var doc = html.parseHTML(src)`) assert.NoError(t, err) assert.IsType(t, Selection{}, rt.Get("doc").Export()) diff --git a/js/modules/k6/html/gen/gen_elements.go b/js/modules/k6/html/gen/gen_elements.go index 5b9098c2aa2..45b8b4b06ae 100644 --- a/js/modules/k6/html/gen/gen_elements.go +++ b/js/modules/k6/html/gen/gen_elements.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package main import ( diff --git a/js/modules/k6/html/html.go b/js/modules/k6/html/html.go index 0eec2eade45..f5efc2e11af 100644 --- a/js/modules/k6/html/html.go +++ b/js/modules/k6/html/html.go @@ -27,9 +27,10 @@ import ( "github.com/PuerkitoBio/goquery" "github.com/dop251/goja" - "github.com/loadimpact/k6/js/common" "github.com/pkg/errors" gohtml "golang.org/x/net/html" + + "github.com/loadimpact/k6/js/common" ) type HTML struct{} diff --git a/js/modules/k6/html/html_test.go b/js/modules/k6/html/html_test.go index 3be99a7b13f..52d1bf16296 100644 --- a/js/modules/k6/html/html_test.go +++ b/js/modules/k6/html/html_test.go @@ -25,8 +25,9 @@ import ( "testing" "github.com/dop251/goja" - "github.com/loadimpact/k6/js/common" "github.com/stretchr/testify/assert" + + "github.com/loadimpact/k6/js/common" ) const testHTML = ` @@ -72,7 +73,7 @@ func TestParseHTML(t *testing.T) { // TODO: I literally cannot think of a snippet that makes goquery error. // I'm not sure if it's even possible without like, an invalid reader or something, which would // be impossible to cause from the JS side. - _, err := common.RunString(rt, `let doc = html.parseHTML(src)`) + _, err := common.RunString(rt, `var doc = html.parseHTML(src)`) assert.NoError(t, err) assert.IsType(t, Selection{}, rt.Get("doc").Export()) diff --git a/js/modules/k6/html/serialize_test.go b/js/modules/k6/html/serialize_test.go index e8f1c77f7b1..82b06a9ee1d 100644 --- a/js/modules/k6/html/serialize_test.go +++ b/js/modules/k6/html/serialize_test.go @@ -25,8 +25,9 @@ import ( "testing" "github.com/dop251/goja" - "github.com/loadimpact/k6/js/common" "github.com/stretchr/testify/assert" + + "github.com/loadimpact/k6/js/common" ) const testSerializeHTML = ` @@ -74,7 +75,7 @@ func TestSerialize(t *testing.T) { rt.Set("src", testSerializeHTML) rt.Set("html", common.Bind(rt, New(), &ctx)) - _, err := common.RunString(rt, `let doc = html.parseHTML(src)`) + _, err := common.RunString(rt, `var doc = html.parseHTML(src)`) assert.NoError(t, err) assert.IsType(t, Selection{}, rt.Get("doc").Export()) diff --git a/js/modules/k6/html/util.go b/js/modules/k6/html/util.go index a60dec7a48c..f0cf35f23dc 100644 --- a/js/modules/k6/html/util.go +++ b/js/modules/k6/html/util.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package html import ( diff --git a/js/modules/k6/http/cookiejar.go b/js/modules/k6/http/cookiejar.go index 59e4beb9a8e..aad7b695390 100644 --- a/js/modules/k6/http/cookiejar.go +++ b/js/modules/k6/http/cookiejar.go @@ -29,8 +29,9 @@ import ( "time" "github.com/dop251/goja" - "github.com/loadimpact/k6/js/common" "github.com/pkg/errors" + + "github.com/loadimpact/k6/js/common" ) // HTTPCookieJar is cookiejar.Jar wrapper to be used in js scripts diff --git a/js/modules/k6/http/http_test.go b/js/modules/k6/http/http_test.go index f81c9ff5714..ee1c43f44fd 100644 --- a/js/modules/k6/http/http_test.go +++ b/js/modules/k6/http/http_test.go @@ -24,10 +24,11 @@ import ( "testing" "github.com/dop251/goja" - "github.com/loadimpact/k6/js/common" - "github.com/loadimpact/k6/lib/netext/httpext" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/loadimpact/k6/js/common" + "github.com/loadimpact/k6/lib/netext/httpext" ) func TestTagURL(t *testing.T) { @@ -47,7 +48,7 @@ func TestTagURL(t *testing.T) { t.Run("expr="+expr, func(t *testing.T) { tag, err := httpext.NewURL(data.u, data.n) require.NoError(t, err) - v, err := common.RunString(rt, "http.url`"+expr+"`") + v, err := common.RunES6String(rt, "http.url`"+expr+"`") if assert.NoError(t, err) { assert.Equal(t, tag, v.Export()) } diff --git a/js/modules/k6/http/http_url.go b/js/modules/k6/http/http_url.go index be1c46e1ae9..49ac36cede3 100644 --- a/js/modules/k6/http/http_url.go +++ b/js/modules/k6/http/http_url.go @@ -24,6 +24,7 @@ import ( "fmt" "github.com/dop251/goja" + "github.com/loadimpact/k6/lib/netext/httpext" ) diff --git a/js/modules/k6/http/request.go b/js/modules/k6/http/request.go index 45cbaa6a63e..0f7a0cdd9ad 100644 --- a/js/modules/k6/http/request.go +++ b/js/modules/k6/http/request.go @@ -33,10 +33,11 @@ import ( "time" "github.com/dop251/goja" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/netext/httpext" - null "gopkg.in/guregu/null.v3" ) // ErrHTTPForbiddenInInitContext is used when a http requests was made in the init context diff --git a/js/modules/k6/http/request_test.go b/js/modules/k6/http/request_test.go index 87564f1ee45..c1b4a04c1de 100644 --- a/js/modules/k6/http/request_test.go +++ b/js/modules/k6/http/request_test.go @@ -40,19 +40,20 @@ import ( "github.com/andybalholm/brotli" "github.com/dop251/goja" "github.com/klauspost/compress/zstd" - "github.com/loadimpact/k6/js/common" - "github.com/loadimpact/k6/lib" - "github.com/loadimpact/k6/lib/metrics" - "github.com/loadimpact/k6/lib/testutils" - "github.com/loadimpact/k6/lib/testutils/httpmultibin" - "github.com/loadimpact/k6/stats" "github.com/mccutchen/go-httpbin/httpbin" "github.com/oxtoacart/bpool" "github.com/sirupsen/logrus" logtest "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/js/common" + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/metrics" + "github.com/loadimpact/k6/lib/testutils" + "github.com/loadimpact/k6/lib/testutils/httpmultibin" + "github.com/loadimpact/k6/stats" ) func assertRequestMetricsEmitted(t *testing.T, sampleContainers []stats.SampleContainer, method, url, name string, status int, group string) { @@ -137,6 +138,7 @@ func newRuntime( Transport: tb.HTTPTransport, BPool: bpool.NewBufferPool(1), Samples: samples, + Tags: map[string]string{"group": root.Path}, } ctx := new(context.Context) @@ -164,7 +166,7 @@ func TestRequestAndBatch(t *testing.T) { t.Run("Redirects", func(t *testing.T) { t.Run("tracing", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.get("HTTPBIN_URL/redirect/9"); + var res = http.get("HTTPBIN_URL/redirect/9"); `)) assert.NoError(t, err) bufSamples := stats.GetBufferedSamples(samples) @@ -188,7 +190,7 @@ func TestRequestAndBatch(t *testing.T) { }) t.Run("11", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.get("HTTPBIN_URL/redirect/11"); + var res = http.get("HTTPBIN_URL/redirect/11"); if (res.status != 302) { throw new Error("wrong status: " + res.status) } if (res.url != "HTTPBIN_URL/relative-redirect/1") { throw new Error("incorrect URL: " + res.url) } if (res.headers["Location"] != "/get") { throw new Error("incorrect Location header: " + res.headers["Location"]) } @@ -204,7 +206,7 @@ func TestRequestAndBatch(t *testing.T) { state.Options.MaxRedirects = null.NewInt(10, false) _, err := common.RunString(rt, sr(` - let res = http.get("HTTPBIN_URL/redirect/11"); + var res = http.get("HTTPBIN_URL/redirect/11"); if (res.status != 302) { throw new Error("wrong status: " + res.status) } if (res.url != "HTTPBIN_URL/relative-redirect/1") { throw new Error("incorrect URL: " + res.url) } if (res.headers["Location"] != "/get") { throw new Error("incorrect Location header: " + res.headers["Location"]) } @@ -221,7 +223,7 @@ func TestRequestAndBatch(t *testing.T) { }) t.Run("requestScopeRedirects", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.get("HTTPBIN_URL/redirect/1", {redirects: 3}); + var res = http.get("HTTPBIN_URL/redirect/1", {redirects: 3}); if (res.status != 200) { throw new Error("wrong status: " + res.status) } if (res.url != "HTTPBIN_URL/get") { throw new Error("incorrect URL: " + res.url) } `)) @@ -229,7 +231,7 @@ func TestRequestAndBatch(t *testing.T) { }) t.Run("requestScopeNoRedirects", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.get("HTTPBIN_URL/redirect/1", {redirects: 0}); + var res = http.get("HTTPBIN_URL/redirect/1", {redirects: 0}); if (res.status != 302) { throw new Error("wrong status: " + res.status) } if (res.url != "HTTPBIN_URL/redirect/1") { throw new Error("incorrect URL: " + res.url) } if (res.headers["Location"] != "/get") { throw new Error("incorrect Location header: " + res.headers["Location"]) } @@ -245,7 +247,7 @@ func TestRequestAndBatch(t *testing.T) { http.Redirect(w, r, sr("HTTPBIN_URL/post"), http.StatusPermanentRedirect) })) _, err := common.RunString(rt, sr(` - let res = http.post("HTTPBIN_URL/post-redirect", "pesho", {redirects: 1}); + var res = http.post("HTTPBIN_URL/post-redirect", "pesho", {redirects: 1}); if (res.status != 200) { throw new Error("wrong status: " + res.status) } if (res.url != "HTTPBIN_URL/post") { throw new Error("incorrect URL: " + res.url) } @@ -289,7 +291,7 @@ func TestRequestAndBatch(t *testing.T) { }) t.Run("UserAgent", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.get("HTTPBIN_URL/user-agent"); + var res = http.get("HTTPBIN_URL/user-agent"); if (res.json()['user-agent'] != "TestUserAgent") { throw new Error("incorrect user agent: " + res.json()['user-agent']) } @@ -298,7 +300,7 @@ func TestRequestAndBatch(t *testing.T) { t.Run("Override", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.get("HTTPBIN_URL/user-agent", { + var res = http.get("HTTPBIN_URL/user-agent", { headers: { "User-Agent": "OtherUserAgent" }, }); if (res.json()['user-agent'] != "OtherUserAgent") { @@ -311,7 +313,7 @@ func TestRequestAndBatch(t *testing.T) { t.Run("Compression", func(t *testing.T) { t.Run("gzip", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.get("HTTPSBIN_IP_URL/gzip"); + var res = http.get("HTTPSBIN_IP_URL/gzip"); if (res.json()['gzipped'] != true) { throw new Error("unexpected body data: " + res.json()['gzipped']) } @@ -320,7 +322,7 @@ func TestRequestAndBatch(t *testing.T) { }) t.Run("deflate", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.get("HTTPBIN_URL/deflate"); + var res = http.get("HTTPBIN_URL/deflate"); if (res.json()['deflated'] != true) { throw new Error("unexpected body data: " + res.json()['deflated']) } @@ -329,7 +331,7 @@ func TestRequestAndBatch(t *testing.T) { }) t.Run("zstd", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.get("HTTPSBIN_IP_URL/zstd"); + var res = http.get("HTTPSBIN_IP_URL/zstd"); if (res.json()['compression'] != 'zstd') { throw new Error("unexpected body data: " + res.json()['compression']) } @@ -338,7 +340,7 @@ func TestRequestAndBatch(t *testing.T) { }) t.Run("brotli", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.get("HTTPSBIN_IP_URL/brotli"); + var res = http.get("HTTPSBIN_IP_URL/brotli"); if (res.json()['compression'] != 'br') { throw new Error("unexpected body data: " + res.json()['compression']) } @@ -347,7 +349,7 @@ func TestRequestAndBatch(t *testing.T) { }) t.Run("zstd-br", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.get("HTTPSBIN_IP_URL/zstd-br"); + var res = http.get("HTTPSBIN_IP_URL/zstd-br"); if (res.json()['compression'] != 'zstd, br') { throw new Error("unexpected compression: " + res.json()['compression']) } @@ -363,7 +365,7 @@ func TestRequestAndBatch(t *testing.T) { })) _, err := common.RunString(rt, sr(` - let res = http.get("HTTPBIN_URL/customcompression"); + var res = http.get("HTTPBIN_URL/customcompression"); if (res.json()["custom"] != true) { throw new Error("unexpected body data: " + res.body) } @@ -374,8 +376,8 @@ func TestRequestAndBatch(t *testing.T) { t.Run("CompressionWithAcceptEncodingHeader", func(t *testing.T) { t.Run("gzip", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let params = { headers: { "Accept-Encoding": "gzip" } }; - let res = http.get("HTTPBIN_URL/gzip", params); + var params = { headers: { "Accept-Encoding": "gzip" } }; + var res = http.get("HTTPBIN_URL/gzip", params); if (res.json()['gzipped'] != true) { throw new Error("unexpected body data: " + res.json()['gzipped']) } @@ -384,8 +386,8 @@ func TestRequestAndBatch(t *testing.T) { }) t.Run("deflate", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let params = { headers: { "Accept-Encoding": "deflate" } }; - let res = http.get("HTTPBIN_URL/deflate", params); + var params = { headers: { "Accept-Encoding": "deflate" } }; + var res = http.get("HTTPBIN_URL/deflate", params); if (res.json()['deflated'] != true) { throw new Error("unexpected body data: " + res.json()['deflated']) } @@ -410,7 +412,7 @@ func TestRequestAndBatch(t *testing.T) { t.Run("HTTP/2", func(t *testing.T) { stats.GetBufferedSamples(samples) // Clean up buffered samples from previous tests _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTP2BIN_URL/get"); + var res = http.request("GET", "HTTP2BIN_URL/get"); if (res.status != 200) { throw new Error("wrong status: " + res.status) } if (res.proto != "HTTP/2.0") { throw new Error("wrong proto: " + res.proto) } `)) @@ -442,7 +444,7 @@ func TestRequestAndBatch(t *testing.T) { for _, versionTest := range tlsVersionTests { t.Run(versionTest.Name, func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - let res = http.get("%s"); + var res = http.get("%s"); if (res.tls_version != %s) { throw new Error("wrong TLS version: " + res.tls_version); } `, versionTest.URL, versionTest.Version)) assert.NoError(t, err) @@ -458,7 +460,7 @@ func TestRequestAndBatch(t *testing.T) { for _, cipherSuiteTest := range tlsCipherSuiteTests { t.Run(cipherSuiteTest.Name, func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(` - let res = http.get("%s"); + var res = http.get("%s"); if (res.tls_cipher_suite != "%s") { throw new Error("wrong TLS cipher suite: " + res.tls_cipher_suite); } `, cipherSuiteTest.URL, cipherSuiteTest.CipherSuite)) assert.NoError(t, err) @@ -467,7 +469,7 @@ func TestRequestAndBatch(t *testing.T) { } t.Run("ocsp_stapled_good", func(t *testing.T) { _, err := common.RunString(rt, ` - let res = http.request("GET", "https://www.microsoft.com/"); + var res = http.request("GET", "https://www.microsoft.com/"); if (res.ocsp.status != http.OCSP_STATUS_GOOD) { throw new Error("wrong ocsp stapled response status: " + res.ocsp.status); } `) assert.NoError(t, err) @@ -494,7 +496,7 @@ func TestRequestAndBatch(t *testing.T) { defer hook.Reset() _, err := common.RunString(rt, ` - let res = http.request("", "", { throw: false }); + var res = http.request("", "", { throw: false }); throw new Error(res.error); `) require.Error(t, err) @@ -517,7 +519,7 @@ func TestRequestAndBatch(t *testing.T) { for _, literal := range []string{`undefined`, `null`} { t.Run(literal, func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(sr(` - let res = http.request("GET", "HTTPBIN_URL/headers", null, %s); + var res = http.request("GET", "HTTPBIN_URL/headers", null, %s); if (res.status != 200) { throw new Error("wrong status: " + res.status); } `), literal)) assert.NoError(t, err) @@ -531,11 +533,11 @@ func TestRequestAndBatch(t *testing.T) { assert.NoError(t, err) state.CookieJar = cookieJar _, err = common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/cookies/set?key=value", null, { redirects: 0 }); + var res = http.request("GET", "HTTPBIN_URL/cookies/set?key=value", null, { redirects: 0 }); if (res.cookies.key[0].value != "value") { throw new Error("wrong cookie value: " + res.cookies.key[0].value); } - const props = ["name", "value", "domain", "path", "expires", "max_age", "secure", "http_only"]; - let cookie = res.cookies.key[0]; - for (let i = 0; i < props.length; i++) { + var props = ["name", "value", "domain", "path", "expires", "max_age", "secure", "http_only"]; + var cookie = res.cookies.key[0]; + for (var i = 0; i < props.length; i++) { if (cookie[props[i]] === undefined) { throw new Error("cookie property not found: " + props[i]); } @@ -553,12 +555,12 @@ func TestRequestAndBatch(t *testing.T) { assert.NoError(t, err) state.CookieJar = cookieJar _, err = common.RunString(rt, sr(` - let jar = http.cookieJar(); + var jar = http.cookieJar(); jar.set("HTTPBIN_URL/cookies", "key", "value"); - let res = http.request("GET", "HTTPBIN_URL/cookies", null, { cookies: { key2: "value2" } }); + var res = http.request("GET", "HTTPBIN_URL/cookies", null, { cookies: { key2: "value2" } }); if (res.json().key != "value") { throw new Error("wrong cookie value: " + res.json().key); } if (res.json().key2 != "value2") { throw new Error("wrong cookie value: " + res.json().key2); } - let jarCookies = jar.cookiesForURL("HTTPBIN_URL/cookies"); + var jarCookies = jar.cookiesForURL("HTTPBIN_URL/cookies"); if (jarCookies.key[0] != "value") { throw new Error("wrong cookie value in jar"); } if (jarCookies.key2 != undefined) { throw new Error("unexpected cookie in jar"); } `)) @@ -571,10 +573,10 @@ func TestRequestAndBatch(t *testing.T) { assert.NoError(t, err) state.CookieJar = cookieJar _, err = common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/cookies", null, { cookies: { key: "value" } }); + var res = http.request("GET", "HTTPBIN_URL/cookies", null, { cookies: { key: "value" } }); if (res.json().key != "value") { throw new Error("wrong cookie value: " + res.json().key); } - let jar = http.cookieJar(); - let jarCookies = jar.cookiesForURL("HTTPBIN_URL/cookies"); + var jar = http.cookieJar(); + var jarCookies = jar.cookiesForURL("HTTPBIN_URL/cookies"); if (jarCookies.key != undefined) { throw new Error("unexpected cookie in jar"); } `)) assert.NoError(t, err) @@ -586,11 +588,11 @@ func TestRequestAndBatch(t *testing.T) { assert.NoError(t, err) state.CookieJar = cookieJar _, err = common.RunString(rt, sr(` - let jar = http.cookieJar(); + var jar = http.cookieJar(); jar.set("HTTPBIN_URL/cookies", "key", "value"); - let res = http.request("GET", "HTTPBIN_URL/cookies", null, { cookies: { key: { value: "replaced", replace: true } } }); + var res = http.request("GET", "HTTPBIN_URL/cookies", null, { cookies: { key: { value: "replaced", replace: true } } }); if (res.json().key != "replaced") { throw new Error("wrong cookie value: " + res.json().key); } - let jarCookies = jar.cookiesForURL("HTTPBIN_URL/cookies"); + var jarCookies = jar.cookiesForURL("HTTPBIN_URL/cookies"); if (jarCookies.key[0] != "value") { throw new Error("wrong cookie value in jar"); } `)) assert.NoError(t, err) @@ -615,7 +617,7 @@ func TestRequestAndBatch(t *testing.T) { require.NoError(t, err) state.CookieJar = cookieJar _, err = common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/redirect-to?url=HTTPSBIN_URL/set-cookie-without-redirect"); + var res = http.request("GET", "HTTPBIN_URL/redirect-to?url=HTTPSBIN_URL/set-cookie-without-redirect"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } `)) require.NoError(t, err) @@ -641,7 +643,7 @@ func TestRequestAndBatch(t *testing.T) { require.NoError(t, err) state.CookieJar = cookieJar _, err = common.RunString(rt, sr(` - let res = http.request("GET", "HTTPSBIN_URL/cookies/set?key=value"); + var res = http.request("GET", "HTTPSBIN_URL/cookies/set?key=value"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } `)) require.NoError(t, err) @@ -682,7 +684,7 @@ func TestRequestAndBatch(t *testing.T) { })) _, err = common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_IP_URL/redirect-to?url=HTTPSBIN_URL/set-cookie-and-redirect"); + var res = http.request("GET", "HTTPBIN_IP_URL/redirect-to?url=HTTPSBIN_URL/set-cookie-and-redirect"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } `)) require.NoError(t, err) @@ -717,9 +719,9 @@ func TestRequestAndBatch(t *testing.T) { assert.NoError(t, err) state.CookieJar = cookieJar _, err = common.RunString(rt, sr(` - let jar = http.cookieJar(); + var jar = http.cookieJar(); jar.set("HTTPBIN_URL/cookies", "key", "value", { domain: "HTTPBIN_DOMAIN" }); - let res = http.request("GET", "HTTPBIN_URL/cookies"); + var res = http.request("GET", "HTTPBIN_URL/cookies"); if (res.json().key != "value") { throw new Error("wrong cookie value 1: " + res.json().key); } @@ -741,9 +743,9 @@ func TestRequestAndBatch(t *testing.T) { assert.NoError(t, err) state.CookieJar = cookieJar _, err = common.RunString(rt, sr(` - let jar = http.cookieJar(); + var jar = http.cookieJar(); jar.set("HTTPBIN_URL/cookies", "key", "value", { path: "/cookies" }); - let res = http.request("GET", "HTTPBIN_URL/cookies"); + var res = http.request("GET", "HTTPBIN_URL/cookies"); if (res.json().key != "value") { throw new Error("wrong cookie value: " + res.json().key); } @@ -765,9 +767,9 @@ func TestRequestAndBatch(t *testing.T) { assert.NoError(t, err) state.CookieJar = cookieJar _, err = common.RunString(rt, sr(` - let jar = http.cookieJar(); + var jar = http.cookieJar(); jar.set("HTTPBIN_URL/cookies", "key", "value", { expires: "Sun, 24 Jul 1983 17:01:02 GMT" }); - let res = http.request("GET", "HTTPBIN_URL/cookies"); + var res = http.request("GET", "HTTPBIN_URL/cookies"); if (res.json().key != undefined) { throw new Error("cookie 'key' unexpectedly found"); } @@ -786,9 +788,9 @@ func TestRequestAndBatch(t *testing.T) { assert.NoError(t, err) state.CookieJar = cookieJar _, err = common.RunString(rt, sr(` - let jar = http.cookieJar(); + var jar = http.cookieJar(); jar.set("HTTPSBIN_IP_URL/cookies", "key", "value", { secure: true }); - let res = http.request("GET", "HTTPSBIN_IP_URL/cookies"); + var res = http.request("GET", "HTTPSBIN_IP_URL/cookies"); if (res.json().key != "value") { throw new Error("wrong cookie value: " + res.json().key); } @@ -802,12 +804,12 @@ func TestRequestAndBatch(t *testing.T) { assert.NoError(t, err) state.CookieJar = cookieJar _, err = common.RunString(rt, sr(` - let jar = new http.CookieJar(); + var jar = new http.CookieJar(); jar.set("HTTPBIN_URL/cookies", "key", "value"); - let res = http.request("GET", "HTTPBIN_URL/cookies", null, { cookies: { key2: "value2" }, jar: jar }); + var res = http.request("GET", "HTTPBIN_URL/cookies", null, { cookies: { key2: "value2" }, jar: jar }); if (res.json().key != "value") { throw new Error("wrong cookie value: " + res.json().key); } if (res.json().key2 != "value2") { throw new Error("wrong cookie value: " + res.json().key2); } - let jarCookies = jar.cookiesForURL("HTTPBIN_URL/cookies"); + var jarCookies = jar.cookiesForURL("HTTPBIN_URL/cookies"); if (jarCookies.key[0] != "value") { throw new Error("wrong cookie value in jar: " + jarCookies.key[0]); } if (jarCookies.key2 != undefined) { throw new Error("unexpected cookie in jar"); } `)) @@ -822,7 +824,7 @@ func TestRequestAndBatch(t *testing.T) { urlExpected := sr("http://****:****@HTTPBIN_IP:HTTPBIN_PORT/basic-auth/bob/pass") _, err := common.RunString(rt, fmt.Sprintf(` - let res = http.request("GET", "%s", null, {}); + var res = http.request("GET", "%s", null, {}); if (res.status != 200) { throw new Error("wrong status: " + res.status); } `, url)) assert.NoError(t, err) @@ -834,7 +836,7 @@ func TestRequestAndBatch(t *testing.T) { urlExpected := sr("http://****:****@HTTPBIN_IP:HTTPBIN_PORT/digest-auth/auth/bob/pass") _, err := common.RunString(rt, fmt.Sprintf(` - let res = http.request("GET", "%s", null, { auth: "digest" }); + var res = http.request("GET", "%s", null, { auth: "digest" }); if (res.status != 200) { throw new Error("wrong status: " + res.status); } if (res.error_code != 0) { throw new Error("wrong error code: " + res.error_code); } `, url)) @@ -850,7 +852,7 @@ func TestRequestAndBatch(t *testing.T) { url := sr("http://bob:pass@HTTPBIN_IP:HTTPBIN_PORT/digest-auth/failure") _, err := common.RunString(rt, fmt.Sprintf(` - let res = http.request("GET", "%s", null, { auth: "digest", timeout: 1, throw: false }); + var res = http.request("GET", "%s", null, { auth: "digest", timeout: 1, throw: false }); `, url)) assert.NoError(t, err) }) @@ -861,7 +863,7 @@ func TestRequestAndBatch(t *testing.T) { for _, literal := range []string{`null`, `undefined`} { t.Run(literal, func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(sr(` - let res = http.request("GET", "HTTPBIN_URL/headers", null, { headers: %s }); + var res = http.request("GET", "HTTPBIN_URL/headers", null, { headers: %s }); if (res.status != 200) { throw new Error("wrong status: " + res.status); } `), literal)) assert.NoError(t, err) @@ -871,7 +873,7 @@ func TestRequestAndBatch(t *testing.T) { t.Run("object", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/headers", null, { + var res = http.request("GET", "HTTPBIN_URL/headers", null, { headers: { "X-My-Header": "value" }, }); if (res.status != 200) { throw new Error("wrong status: " + res.status); } @@ -883,7 +885,7 @@ func TestRequestAndBatch(t *testing.T) { t.Run("Host", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/headers", null, { + var res = http.request("GET", "HTTPBIN_URL/headers", null, { headers: { "Host": "HTTPBIN_DOMAIN" }, }); if (res.status != 200) { throw new Error("wrong status: " + res.status); } @@ -898,7 +900,7 @@ func TestRequestAndBatch(t *testing.T) { for _, literal := range []string{`null`, `undefined`} { t.Run(literal, func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(sr(` - let res = http.request("GET", "HTTPBIN_URL/headers", null, { tags: %s }); + var res = http.request("GET", "HTTPBIN_URL/headers", null, { tags: %s }); if (res.status != 200) { throw new Error("wrong status: " + res.status); } `), literal)) assert.NoError(t, err) @@ -908,7 +910,7 @@ func TestRequestAndBatch(t *testing.T) { t.Run("object", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/headers", null, { tags: { tag: "value" } }); + var res = http.request("GET", "HTTPBIN_URL/headers", null, { tags: { tag: "value" } }); if (res.status != 200) { throw new Error("wrong status: " + res.status); } `)) assert.NoError(t, err) @@ -924,12 +926,12 @@ func TestRequestAndBatch(t *testing.T) { }) t.Run("tags-precedence", func(t *testing.T) { - oldOpts := state.Options - defer func() { state.Options = oldOpts }() - state.Options.RunTags = stats.IntoSampleTags(&map[string]string{"runtag1": "val1", "runtag2": "val2"}) + oldTags := state.Tags + defer func() { state.Tags = oldTags }() + state.Tags = map[string]string{"runtag1": "val1", "runtag2": "val2"} _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/headers", null, { tags: { method: "test", name: "myName", runtag1: "fromreq" } }); + var res = http.request("GET", "HTTPBIN_URL/headers", null, { tags: { method: "test", name: "myName", runtag1: "fromreq" } }); if (res.status != 200) { throw new Error("wrong status: " + res.status); } `)) assert.NoError(t, err) @@ -961,7 +963,7 @@ func TestRequestAndBatch(t *testing.T) { t.Run("GET", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.get("HTTPBIN_URL/get?a=1&b=2"); + var res = http.get("HTTPBIN_URL/get?a=1&b=2"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } if (res.json().args.a != "1") { throw new Error("wrong ?a: " + res.json().args.a); } if (res.json().args.b != "2") { throw new Error("wrong ?b: " + res.json().args.b); } @@ -970,10 +972,10 @@ func TestRequestAndBatch(t *testing.T) { assertRequestMetricsEmitted(t, stats.GetBufferedSamples(samples), "GET", sr("HTTPBIN_URL/get?a=1&b=2"), "", 200, "") t.Run("Tagged", func(t *testing.T) { - _, err := common.RunString(rt, ` - let a = "1"; - let b = "2"; - let res = http.get(http.url`+"`"+sr(`HTTPBIN_URL/get?a=${a}&b=${b}`)+"`"+`); + _, err := common.RunES6String(rt, ` + var a = "1"; + var b = "2"; + var res = http.get(http.url`+"`"+sr(`HTTPBIN_URL/get?a=${a}&b=${b}`)+"`"+`); if (res.status != 200) { throw new Error("wrong status: " + res.status); } if (res.json().args.a != a) { throw new Error("wrong ?a: " + res.json().args.a); } if (res.json().args.b != b) { throw new Error("wrong ?b: " + res.json().args.b); } @@ -984,7 +986,7 @@ func TestRequestAndBatch(t *testing.T) { }) t.Run("HEAD", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.head("HTTPBIN_URL/get?a=1&b=2"); + var res = http.head("HTTPBIN_URL/get?a=1&b=2"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } if (res.body.length != 0) { throw new Error("HEAD responses shouldn't have a body"); } if (!res.headers["Content-Length"]) { throw new Error("Missing or invalid Content-Length header!"); } @@ -995,7 +997,7 @@ func TestRequestAndBatch(t *testing.T) { t.Run("OPTIONS", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.options("HTTPBIN_URL/?a=1&b=2"); + var res = http.options("HTTPBIN_URL/?a=1&b=2"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } if (!res.headers["Access-Control-Allow-Methods"]) { throw new Error("Missing Access-Control-Allow-Methods header!"); } `)) @@ -1009,7 +1011,7 @@ func TestRequestAndBatch(t *testing.T) { // https://tools.ietf.org/html/rfc7231#section-4.3.5 t.Run("DELETE", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.del("HTTPBIN_URL/delete?test=mest"); + var res = http.del("HTTPBIN_URL/delete?test=mest"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } if (res.json().args.test != "mest") { throw new Error("wrong args: " + JSON.stringify(res.json().args)); } `)) @@ -1025,7 +1027,7 @@ func TestRequestAndBatch(t *testing.T) { for method, fn := range postMethods { t.Run(method, func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(sr(` - let res = http.%s("HTTPBIN_URL/%s", "data"); + var res = http.%s("HTTPBIN_URL/%s", "data"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } if (res.json().data != "data") { throw new Error("wrong data: " + res.json().data); } if (res.json().headers["Content-Type"]) { throw new Error("content type set: " + res.json().headers["Content-Type"]); } @@ -1035,7 +1037,7 @@ func TestRequestAndBatch(t *testing.T) { t.Run("object", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(sr(` - let res = http.%s("HTTPBIN_URL/%s", {a: "a", b: 2}); + var res = http.%s("HTTPBIN_URL/%s", {a: "a", b: 2}); if (res.status != 200) { throw new Error("wrong status: " + res.status); } if (res.json().form.a != "a") { throw new Error("wrong a=: " + res.json().form.a); } if (res.json().form.b != "2") { throw new Error("wrong b=: " + res.json().form.b); } @@ -1045,7 +1047,7 @@ func TestRequestAndBatch(t *testing.T) { assertRequestMetricsEmitted(t, stats.GetBufferedSamples(samples), method, sr("HTTPBIN_URL/")+strings.ToLower(method), "", 200, "") t.Run("Content-Type", func(t *testing.T) { _, err := common.RunString(rt, fmt.Sprintf(sr(` - let res = http.%s("HTTPBIN_URL/%s", {a: "a", b: 2}, {headers: {"Content-Type": "application/x-www-form-urlencoded; charset=utf-8"}}); + var res = http.%s("HTTPBIN_URL/%s", {a: "a", b: 2}, {headers: {"Content-Type": "application/x-www-form-urlencoded; charset=utf-8"}}); if (res.status != 200) { throw new Error("wrong status: " + res.status); } if (res.json().form.a != "a") { throw new Error("wrong a=: " + res.json().form.a); } if (res.json().form.b != "2") { throw new Error("wrong b=: " + res.json().form.b); } @@ -1060,16 +1062,16 @@ func TestRequestAndBatch(t *testing.T) { t.Run("Batch", func(t *testing.T) { t.Run("error", func(t *testing.T) { - _, err := common.RunString(rt, `let res = http.batch("https://somevalidurl.com");`) + _, err := common.RunString(rt, `var res = http.batch("https://somevalidurl.com");`) require.Error(t, err) }) t.Run("GET", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let reqs = [ + var reqs = [ ["GET", "HTTPBIN_URL/"], ["GET", "HTTPBIN_IP_URL/"], ]; - let res = http.batch(reqs); + var res = http.batch(reqs); for (var key in res) { if (res[key].status != 200) { throw new Error("wrong status: " + res[key].status); } if (res[key].url != reqs[key][1]) { throw new Error("wrong url: " + res[key].url); } @@ -1080,7 +1082,7 @@ func TestRequestAndBatch(t *testing.T) { assertRequestMetricsEmitted(t, bufSamples, "GET", sr("HTTPBIN_IP_URL/"), "", 200, "") t.Run("Tagged", func(t *testing.T) { - _, err := common.RunString(rt, sr(` + _, err := common.RunES6String(rt, sr(` let fragment = "get"; let reqs = [ ["GET", http.url`+"`"+`HTTPBIN_URL/${fragment}`+"`"+`], @@ -1099,11 +1101,11 @@ func TestRequestAndBatch(t *testing.T) { t.Run("Shorthand", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let reqs = [ + var reqs = [ "HTTPBIN_URL/", "HTTPBIN_IP_URL/", ]; - let res = http.batch(reqs); + var res = http.batch(reqs); for (var key in res) { if (res[key].status != 200) { throw new Error("wrong status: " + key + ": " + res[key].status); } if (res[key].url != reqs[key]) { throw new Error("wrong url: " + key + ": " + res[key].url); } @@ -1114,7 +1116,7 @@ func TestRequestAndBatch(t *testing.T) { assertRequestMetricsEmitted(t, bufSamples, "GET", sr("HTTPBIN_IP_URL/"), "", 200, "") t.Run("Tagged", func(t *testing.T) { - _, err := common.RunString(rt, sr(` + _, err := common.RunES6String(rt, sr(` let fragment = "get"; let reqs = [ http.url`+"`"+`HTTPBIN_URL/${fragment}`+"`"+`, @@ -1134,11 +1136,11 @@ func TestRequestAndBatch(t *testing.T) { t.Run("ObjectForm", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let reqs = [ + var reqs = [ { method: "GET", url: "HTTPBIN_URL/" }, { url: "HTTPBIN_IP_URL/", method: "GET"}, ]; - let res = http.batch(reqs); + var res = http.batch(reqs); for (var key in res) { if (res[key].status != 200) { throw new Error("wrong status: " + key + ": " + res[key].status); } if (res[key].url != reqs[key].url) { throw new Error("wrong url: " + key + ": " + res[key].url + " != " + reqs[key].url); } @@ -1151,13 +1153,13 @@ func TestRequestAndBatch(t *testing.T) { t.Run("ObjectKeys", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let reqs = { + var reqs = { shorthand: "HTTPBIN_URL/get?r=shorthand", arr: ["GET", "HTTPBIN_URL/get?r=arr", null, {tags: {name: 'arr'}}], obj1: { method: "GET", url: "HTTPBIN_URL/get?r=obj1" }, obj2: { url: "HTTPBIN_URL/get?r=obj2", params: {tags: {name: 'obj2'}}, method: "GET"}, }; - let res = http.batch(reqs); + var res = http.batch(reqs); for (var key in res) { if (res[key].status != 200) { throw new Error("wrong status: " + key + ": " + res[key].status); } if (res[key].json().args.r != key) { throw new Error("wrong request id: " + key); } @@ -1176,7 +1178,7 @@ func TestRequestAndBatch(t *testing.T) { rt.Set("someBinFile", []byte(testStr)) _, err := common.RunString(rt, sr(` - let reqs = [ + var reqs = [ ["POST", "HTTPBIN_URL/post", "testbody"], ["POST", "HTTPBIN_URL/post", someStrFile], ["POST", "HTTPBIN_URL/post", someBinFile], @@ -1202,7 +1204,7 @@ func TestRequestAndBatch(t *testing.T) { }, }, ]; - let res = http.batch(reqs); + var res = http.batch(reqs); for (var key in res) { if (res[key].status != 200) { throw new Error("wrong status: " + key + ": " + res[key].status); } if (res[key].json().data != "testbody" && res[key].json().form.hello != "world!") { throw new Error("wrong response for " + key + ": " + res[key].body); } @@ -1215,7 +1217,7 @@ func TestRequestAndBatch(t *testing.T) { }) t.Run("POST", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.batch([ ["POST", "HTTPBIN_URL/post", { key: "value" }] ]); + var res = http.batch([ ["POST", "HTTPBIN_URL/post", { key: "value" }] ]); for (var key in res) { if (res[key].status != 200) { throw new Error("wrong status: " + key + ": " + res[key].status); } if (res[key].json().form.key != "value") { throw new Error("wrong form: " + key + ": " + JSON.stringify(res[key].json().form)); } @@ -1225,7 +1227,7 @@ func TestRequestAndBatch(t *testing.T) { }) t.Run("PUT", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.batch([ ["PUT", "HTTPBIN_URL/put", { key: "value" }] ]); + var res = http.batch([ ["PUT", "HTTPBIN_URL/put", { key: "value" }] ]); for (var key in res) { if (res[key].status != 200) { throw new Error("wrong status: " + key + ": " + res[key].status); } if (res[key].json().form.key != "value") { throw new Error("wrong form: " + key + ": " + JSON.stringify(res[key].json().form)); } @@ -1238,9 +1240,9 @@ func TestRequestAndBatch(t *testing.T) { t.Run("HTTPRequest", func(t *testing.T) { t.Run("EmptyBody", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let reqUrl = "HTTPBIN_URL/cookies" - let res = http.get(reqUrl); - let jar = new http.CookieJar(); + var reqUrl = "HTTPBIN_URL/cookies" + var res = http.get(reqUrl); + var jar = new http.CookieJar(); jar.set("HTTPBIN_URL/cookies", "key", "value"); res = http.request("GET", "HTTPBIN_URL/cookies", null, { cookies: { key2: "value2" }, jar: jar }); @@ -1260,7 +1262,7 @@ func TestRequestAndBatch(t *testing.T) { }) t.Run("NonEmptyBody", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.post("HTTPBIN_URL/post", {a: "a", b: 2}, {headers: {"Content-Type": "application/x-www-form-urlencoded; charset=utf-8"}}); + var res = http.post("HTTPBIN_URL/post", {a: "a", b: 2}, {headers: {"Content-Type": "application/x-www-form-urlencoded; charset=utf-8"}}); if (res.status != 200) { throw new Error("wrong status: " + res.status); } if (res.request["body"] != "a=a&b=2") { throw new Error("http request body was not set properly: " + JSON.stringify(res.request))} `)) @@ -1268,74 +1270,6 @@ func TestRequestAndBatch(t *testing.T) { }) }) } -func TestSystemTags(t *testing.T) { - t.Parallel() - tb, state, samples, rt, _ := newRuntime(t) - defer tb.Cleanup() - - // Handple paths with custom logic - tb.Mux.HandleFunc("/wrong-redirect", func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Location", "%") - w.WriteHeader(http.StatusTemporaryRedirect) - }) - - httpGet := fmt.Sprintf(`http.get("%s");`, tb.ServerHTTP.URL) - httpsGet := fmt.Sprintf(`http.get("%s");`, tb.ServerHTTPS.URL) - - httpURL, err := url.Parse(tb.ServerHTTP.URL) - require.NoError(t, err) - - testedSystemTags := []struct{ tag, code, expVal string }{ - {"proto", httpGet, "HTTP/1.1"}, - {"status", httpGet, "200"}, - {"method", httpGet, "GET"}, - {"url", httpGet, tb.ServerHTTP.URL}, - {"url", httpsGet, tb.ServerHTTPS.URL}, - {"ip", httpGet, httpURL.Hostname()}, - {"name", httpGet, tb.ServerHTTP.URL}, - {"group", httpGet, ""}, - {"vu", httpGet, "0"}, - {"iter", httpGet, "0"}, - {"tls_version", httpsGet, expectedTLSVersion}, - {"ocsp_status", httpsGet, "unknown"}, - { - "error", - tb.Replacer.Replace(`http.get("http://127.0.0.1:1");`), - `dial: connection refused`, - }, - { - "error_code", - tb.Replacer.Replace(`http.get("http://127.0.0.1:1");`), - "1212", - }, - } - - state.Options.Throw = null.BoolFrom(false) - state.Options.Apply(lib.Options{TLSVersion: &lib.TLSVersions{Max: lib.TLSVersion13}}) - - for num, tc := range testedSystemTags { - tc := tc - t.Run(fmt.Sprintf("TC %d with only %s", num, tc.tag), func(t *testing.T) { - state.Options.SystemTags = stats.ToSystemTagSet([]string{tc.tag}) - - _, err := common.RunString(rt, tc.code) - assert.NoError(t, err) - - bufSamples := stats.GetBufferedSamples(samples) - assert.NotEmpty(t, bufSamples) - for _, sampleC := range bufSamples { - - for _, sample := range sampleC.GetSamples() { - assert.NotEmpty(t, sample.Tags) - for emittedTag, emittedVal := range sample.Tags.CloneTags() { - assert.Equal(t, tc.tag, emittedTag) - assert.Equal(t, tc.expVal, emittedVal) - } - } - } - }) - } -} func TestRequestCompression(t *testing.T) { t.Parallel() @@ -1448,7 +1382,7 @@ func TestRequestCompression(t *testing.T) { } expectedEncoding = strings.Join(algos, ", ") actualEncoding = expectedEncoding - _, err := common.RunString(rt, tb.Replacer.Replace(` + _, err := common.RunES6String(rt, tb.Replacer.Replace(` http.post("HTTPBIN_URL/compressed-text", `+"`"+text+"`"+`, {"compression": "`+testCase.compression+`"}); `)) if testCase.expectedError == "" { @@ -1467,7 +1401,7 @@ func TestRequestCompression(t *testing.T) { logHook.Drain() t.Run("encoding", func(t *testing.T) { - _, err := common.RunString(rt, tb.Replacer.Replace(` + _, err := common.RunES6String(rt, tb.Replacer.Replace(` http.post("HTTPBIN_URL/compressed-text", `+"`"+text+"`"+`, {"compression": "`+actualEncoding+`", "headers": {"Content-Encoding": "`+expectedEncoding+`"} @@ -1479,7 +1413,7 @@ func TestRequestCompression(t *testing.T) { }) t.Run("encoding and length", func(t *testing.T) { - _, err := common.RunString(rt, tb.Replacer.Replace(` + _, err := common.RunES6String(rt, tb.Replacer.Replace(` http.post("HTTPBIN_URL/compressed-text", `+"`"+text+"`"+`, {"compression": "`+actualEncoding+`", "headers": {"Content-Encoding": "`+expectedEncoding+`", @@ -1493,7 +1427,7 @@ func TestRequestCompression(t *testing.T) { expectedEncoding = actualEncoding t.Run("correct encoding", func(t *testing.T) { - _, err := common.RunString(rt, tb.Replacer.Replace(` + _, err := common.RunES6String(rt, tb.Replacer.Replace(` http.post("HTTPBIN_URL/compressed-text", `+"`"+text+"`"+`, {"compression": "`+actualEncoding+`", "headers": {"Content-Encoding": "`+actualEncoding+`"} @@ -1515,7 +1449,7 @@ func TestRequestCompression(t *testing.T) { t.Run("content-length is set", func(t *testing.T) { _, err := common.RunString(rt, tb.Replacer.Replace(` - let resp = http.post("HTTPBIN_URL/post", "0123456789"); + var resp = http.post("HTTPBIN_URL/post", "0123456789"); if (resp.json().headers["Content-Length"][0] != "10") { throw new Error("content-length not set: " + JSON.stringify(resp.json().headers)); } @@ -1571,29 +1505,29 @@ func TestResponseTypes(t *testing.T) { } _, err := common.RunString(rt, replace(` - let expText = "EXP_TEXT"; - let expBinLength = EXP_BIN_LEN; + var expText = "EXP_TEXT"; + var expBinLength = EXP_BIN_LEN; // Check default behaviour with a unicode text - let respTextImplicit = http.get("HTTPBIN_URL/get-text").body; + var respTextImplicit = http.get("HTTPBIN_URL/get-text").body; if (respTextImplicit !== expText) { throw new Error("default response body should be '" + expText + "' but was '" + respTextImplicit + "'"); } http.post("HTTPBIN_URL/compare-text", respTextImplicit); // Check discarding of responses - let respNone = http.get("HTTPBIN_URL/get-text", { responseType: "none" }).body; + var respNone = http.get("HTTPBIN_URL/get-text", { responseType: "none" }).body; if (respNone != null) { throw new Error("none response body should be null but was " + respNone); } // Check binary transmission of the text response as well - let respTextInBin = http.get("HTTPBIN_URL/get-text", { responseType: "binary" }).body; + var respTextInBin = http.get("HTTPBIN_URL/get-text", { responseType: "binary" }).body; // Hack to convert a utf-8 array to a JS string - let strConv = ""; + var strConv = ""; function pad(n) { return n.length < 2 ? "0" + n : n; } - for( let i = 0; i < respTextInBin.length; i++ ) { + for( var i = 0; i < respTextInBin.length; i++ ) { strConv += ( "%" + pad(respTextInBin[i].toString(16))); } strConv = decodeURIComponent(strConv); @@ -1603,11 +1537,11 @@ func TestResponseTypes(t *testing.T) { http.post("HTTPBIN_URL/compare-text", respTextInBin); // Check binary response - let respBin = http.get("HTTPBIN_URL/get-bin", { responseType: "binary" }).body; + var respBin = http.get("HTTPBIN_URL/get-bin", { responseType: "binary" }).body; if (respBin.length !== expBinLength) { throw new Error("response body length should be '" + expBinLength + "' but was '" + respBin.length + "'"); } - for( let i = 0; i < respBin.length; i++ ) { + for( var i = 0; i < respBin.length; i++ ) { if ( respBin[i] !== i%256 ) { throw new Error("expected value " + (i%256) + " to be at position " + i + " but it was " + respBin[i]); } @@ -1620,16 +1554,16 @@ func TestResponseTypes(t *testing.T) { state.Options.DiscardResponseBodies = null.BoolFrom(true) _, err = common.RunString(rt, replace(` - let expText = "EXP_TEXT"; + var expText = "EXP_TEXT"; // Check default behaviour - let respDefault = http.get("HTTPBIN_URL/get-text").body; + var respDefault = http.get("HTTPBIN_URL/get-text").body; if (respDefault !== null) { throw new Error("default response body should be discarded and null but was " + respDefault); } // Check explicit text response - let respTextExplicit = http.get("HTTPBIN_URL/get-text", { responseType: "text" }).body; + var respTextExplicit = http.get("HTTPBIN_URL/get-text", { responseType: "text" }).body; if (respTextExplicit !== expText) { throw new Error("text response body should be '" + expText + "' but was '" + respTextExplicit + "'"); } @@ -1684,46 +1618,46 @@ func TestErrorCodes(t *testing.T) { name: "Unroutable", expectedErrorCode: 1101, expectedErrorMsg: "lookup: no such host", - script: `let res = http.request("GET", "http://sdafsgdhfjg/");`, + script: `var res = http.request("GET", "http://sdafsgdhfjg/");`, }, { name: "404", status: 404, expectedErrorCode: 1404, - script: `let res = http.request("GET", "HTTPBIN_URL/status/404");`, + script: `var res = http.request("GET", "HTTPBIN_URL/status/404");`, }, { name: "Unroutable redirect", expectedErrorCode: 1101, expectedErrorMsg: "lookup: no such host", moreSamples: 1, - script: `let res = http.request("GET", "HTTPBIN_URL/redirect-to?url=http://dafsgdhfjg/");`, + script: `var res = http.request("GET", "HTTPBIN_URL/redirect-to?url=http://dafsgdhfjg/");`, }, { name: "Non location redirect", expectedErrorCode: 1000, expectedErrorMsg: "302 response missing Location header", - script: `let res = http.request("GET", "HTTPBIN_URL/no-location-redirect");`, + script: `var res = http.request("GET", "HTTPBIN_URL/no-location-redirect");`, }, { name: "Bad location redirect", expectedErrorCode: 1000, expectedErrorMsg: "failed to parse Location header \"h\\t:/\": ", - script: `let res = http.request("GET", "HTTPBIN_URL/bad-location-redirect");`, + script: `var res = http.request("GET", "HTTPBIN_URL/bad-location-redirect");`, }, { name: "Missing protocol", expectedErrorCode: 1000, expectedErrorMsg: `unsupported protocol scheme ""`, - script: `let res = http.request("GET", "dafsgdhfjg/");`, + script: `var res = http.request("GET", "dafsgdhfjg/");`, }, { name: "Too many redirects", status: 302, moreSamples: 2, script: ` - let res = http.get("HTTPBIN_URL/relative-redirect/3", {redirects: 2}); + var res = http.get("HTTPBIN_URL/relative-redirect/3", {redirects: 2}); if (res.url != "HTTPBIN_URL/relative-redirect/1") { throw new Error("incorrect URL: " + res.url) }`, }, { @@ -1733,7 +1667,7 @@ func TestErrorCodes(t *testing.T) { expectedErrorMsg: `dial: connection refused`, expectedErrorCode: 1212, script: ` - let res = http.get("HTTPBIN_URL/redirect-to?url=http%3A%2F%2F127.0.0.1%3A1%2Fpesho"); + var res = http.get("HTTPBIN_URL/redirect-to?url=http%3A%2F%2F127.0.0.1%3A1%2Fpesho"); if (res.url != "http://127.0.0.1:1/pesho") { throw new Error("incorrect URL: " + res.url) }`, }, } @@ -1793,7 +1727,7 @@ func TestResponseWaitingAndReceivingTimings(t *testing.T) { })) _, err := common.RunString(rt, tb.Replacer.Replace(` - let resp = http.get("HTTPBIN_URL/slow-response"); + var resp = http.get("HTTPBIN_URL/slow-response"); if (resp.timings.waiting < 1000) { throw new Error("expected waiting time to be over 1000ms but was " + resp.timings.waiting); @@ -1819,7 +1753,7 @@ func TestResponseTimingsWhenTimeout(t *testing.T) { state.Options.Throw = null.BoolFrom(false) _, err := common.RunString(rt, tb.Replacer.Replace(` - let resp = http.get("HTTPBIN_URL/delay/10", { timeout: 2500 }); + var resp = http.get("HTTPBIN_URL/delay/10", { timeout: 2500 }); if (resp.timings.waiting < 2000) { throw new Error("expected waiting time to be over 2000ms but was " + resp.timings.waiting); @@ -1841,18 +1775,18 @@ func TestNoResponseBodyMangling(t *testing.T) { state.Options.Throw = null.BoolFrom(true) _, err := common.RunString(rt, tb.Replacer.Replace(` - const batchSize = 100; + var batchSize = 100; - let requests = []; + var requests = []; - for (let i = 0; i < batchSize; i++) { + for (var i = 0; i < batchSize; i++) { requests.push(["GET", "HTTPBIN_URL/get?req=" + i, null, { responseType: (i % 2 ? "binary" : "text") }]); } - let responses = http.batch(requests); + var responses = http.batch(requests); - for (let i = 0; i < batchSize; i++) { - let reqNumber = parseInt(responses[i].json().args.req[0], 10); + for (var i = 0; i < batchSize; i++) { + var reqNumber = parseInt(responses[i].json().args.req[0], 10); if (i !== reqNumber) { throw new Error("Response " + i + " has " + reqNumber + ", expected " + i) } @@ -1945,7 +1879,7 @@ func TestErrorsWithDecompression(t *testing.T) { _, err := common.RunString(rt, tb.Replacer.Replace(` function handleResponseEncodingError (encoding) { - let resp = http.get("HTTPBIN_URL/broken-archive?encoding=" + encoding); + var resp = http.get("HTTPBIN_URL/broken-archive?encoding=" + encoding); if (resp.error_code != 1701) { throw new Error("Expected error_code 1701 for '" + encoding +"', but got " + resp.error_code); } @@ -1977,7 +1911,7 @@ func TestDigestAuthWithBody(t *testing.T) { ) _, err := common.RunString(rt, fmt.Sprintf(` - let res = http.post(%q, "super secret body", { auth: "digest" }); + var res = http.post(%q, "super secret body", { auth: "digest" }); if (res.status !== 200) { throw new Error("wrong status: " + res.status); } if (res.error_code !== 0) { throw new Error("wrong error code: " + res.error_code); } `, urlWithCreds)) diff --git a/js/modules/k6/http/response.go b/js/modules/k6/http/response.go index 987347c94ee..8deb1b2462c 100644 --- a/js/modules/k6/http/response.go +++ b/js/modules/k6/http/response.go @@ -27,6 +27,7 @@ import ( "strings" "github.com/dop251/goja" + "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/js/modules/k6/html" "github.com/loadimpact/k6/lib/netext/httpext" diff --git a/js/modules/k6/http/response_test.go b/js/modules/k6/http/response_test.go index 4c77562df81..a8691c148cd 100644 --- a/js/modules/k6/http/response_test.go +++ b/js/modules/k6/http/response_test.go @@ -27,10 +27,11 @@ import ( "net/url" "testing" + "github.com/stretchr/testify/assert" + "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/lib/netext/httpext" "github.com/loadimpact/k6/stats" - "github.com/stretchr/testify/assert" ) const testGetFormHTML = ` @@ -80,7 +81,7 @@ const jsonData = `{"glossary": { "GlossSeeAlso": ["GML","XML"]}, "GlossSee": "markup"}}}}}` -const invalidJSONData = `{ +const invalidJSONData = `{ "a":"apple", "t":testing" }` @@ -135,7 +136,7 @@ func TestResponse(t *testing.T) { t.Run("Html", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/html"); + var res = http.request("GET", "HTTPBIN_URL/html"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } if (res.body.indexOf("Herman Melville - Moby-Dick") == -1) { throw new Error("wrong body: " + res.body); } `)) @@ -168,11 +169,15 @@ func TestResponse(t *testing.T) { if assert.NoError(t, err) { old := state.Group state.Group = g - defer func() { state.Group = old }() + state.Tags["group"] = g.Path + defer func() { + state.Group = old + state.Tags["group"] = old.Path + }() } _, err = common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/html"); + var res = http.request("GET", "HTTPBIN_URL/html"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } if (res.body.indexOf("Herman Melville - Moby-Dick") == -1) { throw new Error("wrong body: " + res.body); } `)) @@ -182,7 +187,7 @@ func TestResponse(t *testing.T) { }) t.Run("Json", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/get?a=1&b=2"); + var res = http.request("GET", "HTTPBIN_URL/get?a=1&b=2"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } if (res.json().args.a != "1") { throw new Error("wrong ?a: " + res.json().args.a); } if (res.json().args.b != "2") { throw new Error("wrong ?b: " + res.json().args.b); } @@ -204,7 +209,7 @@ func TestResponse(t *testing.T) { }) t.Run("JsonSelector", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/json"); + var res = http.request("GET", "HTTPBIN_URL/json"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } var value = res.json("glossary.friends.1") @@ -217,7 +222,7 @@ func TestResponse(t *testing.T) { if (value != undefined) { throw new Error("Expected undefined, but got: " + value); } - value = res.json("glossary.null") + value = res.json("glossary.null") if (value != null) { throw new Error("Expected null, but got: " + value); } @@ -233,8 +238,8 @@ func TestResponse(t *testing.T) { if (value != true) { throw new Error("Expected boolean true, but got: " + value); } - value = res.json("glossary.GlossDiv.GlossList.GlossEntry.GlossDef.title") - if (value != "example glossary") + value = res.json("glossary.GlossDiv.GlossList.GlossEntry.GlossDef.title") + if (value != "example glossary") { throw new Error("Expected 'example glossary'', but got: " + value); } value = res.json("glossary.friends.#.first")[0] @@ -248,11 +253,11 @@ func TestResponse(t *testing.T) { t.Run("SubmitForm", func(t *testing.T) { t.Run("withoutArgs", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/forms/post"); + var res = http.request("GET", "HTTPBIN_URL/forms/post"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } res = res.submitForm() if (res.status != 200) { throw new Error("wrong status: " + res.status); } - let data = res.json().form + var data = res.json().form if (data.custname[0] !== "" || data.extradata !== undefined || data.comments[0] !== "" || @@ -267,11 +272,11 @@ func TestResponse(t *testing.T) { t.Run("withFields", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/forms/post"); + var res = http.request("GET", "HTTPBIN_URL/forms/post"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } res = res.submitForm({ fields: { custname: "test", extradata: "test2" } }) if (res.status != 200) { throw new Error("wrong status: " + res.status); } - let data = res.json().form + var data = res.json().form if (data.custname[0] !== "test" || data.extradata[0] !== "test2" || data.comments[0] !== "" || @@ -286,11 +291,11 @@ func TestResponse(t *testing.T) { t.Run("withRequestParams", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/forms/post"); + var res = http.request("GET", "HTTPBIN_URL/forms/post"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } res = res.submitForm({ params: { headers: { "My-Fancy-Header": "SomeValue" } }}) if (res.status != 200) { throw new Error("wrong status: " + res.status); } - let headers = res.json().headers + var headers = res.json().headers if (headers["My-Fancy-Header"][0] !== "SomeValue" ) { throw new Error("incorrect headers: " + JSON.stringify(headers)); } `)) assert.NoError(t, err) @@ -299,11 +304,11 @@ func TestResponse(t *testing.T) { t.Run("withFormSelector", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/forms/post"); + var res = http.request("GET", "HTTPBIN_URL/forms/post"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } res = res.submitForm({ formSelector: 'form[method="post"]' }) if (res.status != 200) { throw new Error("wrong status: " + res.status); } - let data = res.json().form + var data = res.json().form if (data.custname[0] !== "" || data.extradata !== undefined || data.comments[0] !== "" || @@ -318,7 +323,7 @@ func TestResponse(t *testing.T) { t.Run("withNonExistentForm", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/forms/post"); + var res = http.request("GET", "HTTPBIN_URL/forms/post"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } res.submitForm({ formSelector: "#doesNotExist" }) `)) @@ -327,11 +332,11 @@ func TestResponse(t *testing.T) { t.Run("withGetMethod", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/myforms/get"); + var res = http.request("GET", "HTTPBIN_URL/myforms/get"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } res = res.submitForm() if (res.status != 200) { throw new Error("wrong status: " + res.status); } - let data = res.json().query + var data = res.json().query if (data.input_with_value[0] !== "value" || data.input_without_value[0] !== "" || data.select_one[0] !== "yes this option" || @@ -347,7 +352,7 @@ func TestResponse(t *testing.T) { t.Run("ClickLink", func(t *testing.T) { t.Run("withoutArgs", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/links/10/0"); + var res = http.request("GET", "HTTPBIN_URL/links/10/0"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } res = res.clickLink() if (res.status != 200) { throw new Error("wrong status: " + res.status); } @@ -358,7 +363,7 @@ func TestResponse(t *testing.T) { t.Run("withSelector", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/links/10/0"); + var res = http.request("GET", "HTTPBIN_URL/links/10/0"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } res = res.clickLink({ selector: 'a:nth-child(4)' }) if (res.status != 200) { throw new Error("wrong status: " + res.status); } @@ -369,7 +374,7 @@ func TestResponse(t *testing.T) { t.Run("withNonExistentLink", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL/links/10/0"); + var res = http.request("GET", "HTTPBIN_URL/links/10/0"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } res = res.clickLink({ selector: 'a#doesNotExist' }) `)) @@ -378,11 +383,11 @@ func TestResponse(t *testing.T) { t.Run("withRequestParams", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = http.request("GET", "HTTPBIN_URL"); + var res = http.request("GET", "HTTPBIN_URL"); if (res.status != 200) { throw new Error("wrong status: " + res.status); } res = res.clickLink({ selector: 'a[href="/get"]', params: { headers: { "My-Fancy-Header": "SomeValue" } } }) if (res.status != 200) { throw new Error("wrong status: " + res.status); } - let headers = res.json().headers + var headers = res.json().headers if (headers["My-Fancy-Header"][0] !== "SomeValue" ) { throw new Error("incorrect headers: " + JSON.stringify(headers)); } `)) assert.NoError(t, err) @@ -392,6 +397,7 @@ func TestResponse(t *testing.T) { } func BenchmarkResponseJson(b *testing.B) { + b.Skipf("We need to have context in the response") testCases := []struct { selector string }{ @@ -405,6 +411,7 @@ func BenchmarkResponseJson(b *testing.B) { {"glossary"}, } for _, tc := range testCases { + tc := tc b.Run(fmt.Sprintf("Selector %s ", tc.selector), func(b *testing.B) { for n := 0; n < b.N; n++ { resp := responseFromHttpext(&httpext.Response{Body: jsonData}) diff --git a/js/modules/k6/http/tls_test.go b/js/modules/k6/http/tls_test.go index a23ba69f741..85d3f736135 100644 --- a/js/modules/k6/http/tls_test.go +++ b/js/modules/k6/http/tls_test.go @@ -27,10 +27,11 @@ import ( "net/http" "testing" + "github.com/stretchr/testify/assert" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/lib" - "github.com/stretchr/testify/assert" - null "gopkg.in/guregu/null.v3" ) func TestTLS13Support(t *testing.T) { @@ -47,7 +48,7 @@ func TestTLS13Support(t *testing.T) { state.Options.Apply(lib.Options{TLSVersion: &lib.TLSVersions{Max: lib.TLSVersion13}}) _, err := common.RunString(rt, tb.Replacer.Replace(` - let resp = http.get("HTTPSBIN_URL/tls-version"); + var resp = http.get("HTTPSBIN_URL/tls-version"); if (resp.body != "tls1.3") { throw new Error("unexpected tls version: " + resp.body); } diff --git a/js/modules/k6/k6.go b/js/modules/k6/k6.go index 084ece55915..7bbfddd5e94 100644 --- a/js/modules/k6/k6.go +++ b/js/modules/k6/k6.go @@ -23,16 +23,16 @@ package k6 import ( "context" "math/rand" - "strconv" "sync/atomic" "time" "github.com/dop251/goja" + "github.com/pkg/errors" + "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/metrics" "github.com/loadimpact/k6/stats" - "github.com/pkg/errors" ) type K6 struct{} @@ -84,23 +84,23 @@ func (*K6) Group(ctx context.Context, name string, fn goja.Callable) (goja.Value old := state.Group state.Group = g - defer func() { state.Group = old }() + + shouldUpdateTag := state.Options.SystemTags.Has(stats.TagGroup) + if shouldUpdateTag { + state.Tags["group"] = g.Path + } + defer func() { + state.Group = old + if shouldUpdateTag { + state.Tags["group"] = old.Path + } + }() startTime := time.Now() ret, err := fn(goja.Undefined()) t := time.Now() - tags := state.Options.RunTags.CloneTags() - if state.Options.SystemTags.Has(stats.TagGroup) { - tags["group"] = g.Path - } - if state.Options.SystemTags.Has(stats.TagVU) { - tags["vu"] = strconv.FormatInt(state.Vu, 10) - } - if state.Options.SystemTags.Has(stats.TagIter) { - tags["iter"] = strconv.FormatInt(state.Iteration, 10) - } - + tags := state.CloneTags() stats.PushIfNotDone(ctx, state.Samples, stats.Sample{ Time: t, Metric: metrics.GroupDuration, @@ -119,23 +119,14 @@ func (*K6) Check(ctx context.Context, arg0, checks goja.Value, extras ...goja.Va rt := common.GetRuntime(ctx) t := time.Now() - // Prepare tags, make sure the `group` tag can't be overwritten. - commonTags := state.Options.RunTags.CloneTags() - if state.Options.SystemTags.Has(stats.TagGroup) { - commonTags["group"] = state.Group.Path - } + // Prepare the metric tags + commonTags := state.CloneTags() if len(extras) > 0 { obj := extras[0].ToObject(rt) for _, k := range obj.Keys() { commonTags[k] = obj.Get(k).String() } } - if state.Options.SystemTags.Has(stats.TagVU) { - commonTags["vu"] = strconv.FormatInt(state.Vu, 10) - } - if state.Options.SystemTags.Has(stats.TagIter) { - commonTags["iter"] = strconv.FormatInt(state.Iteration, 10) - } succ := true var exc error diff --git a/js/modules/k6/k6_test.go b/js/modules/k6/k6_test.go index 6dbe4c1ef95..92194e77365 100644 --- a/js/modules/k6/k6_test.go +++ b/js/modules/k6/k6_test.go @@ -27,14 +27,14 @@ import ( "testing" "time" - "github.com/loadimpact/k6/stats" - "github.com/dop251/goja" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/metrics" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/loadimpact/k6/stats" ) func TestFail(t *testing.T) { @@ -94,14 +94,14 @@ func TestRandSeed(t *testing.T) { rand := 0.8487305991992138 _, err := common.RunString(rt, fmt.Sprintf(` - let rnd = Math.random(); + var rnd = Math.random(); if (rnd == %.16f) { throw new Error("wrong random: " + rnd); } `, rand)) assert.NoError(t, err) _, err = common.RunString(rt, fmt.Sprintf(` k6.randomSeed(12345) - let rnd = Math.random(); + var rnd = Math.random(); if (rnd != %.16f) { throw new Error("wrong random: " + rnd); } `, rand)) assert.NoError(t, err) @@ -155,6 +155,7 @@ func TestCheck(t *testing.T) { SystemTags: &stats.DefaultSystemTagSet, }, Samples: samples, + Tags: map[string]string{"group": root.Path}, }, samples } t.Run("Object", func(t *testing.T) { @@ -255,7 +256,7 @@ func TestCheck(t *testing.T) { "b": function() { throw new Error("error B") }, }) `) - assert.EqualError(t, err, "Error: error A at a (:3:27(6))") + assert.EqualError(t, err, "Error: error A at :3:28(4)") bufSamples := stats.GetBufferedSamples(samples) if assert.Len(t, bufSamples, 1) { @@ -275,8 +276,8 @@ func TestCheck(t *testing.T) { t.Run("Types", func(t *testing.T) { templates := map[string]string{ "Literal": `k6.check(null,{"check": %s})`, - "Callable": `k6.check(null,{"check": ()=>%s})`, - "Callable/Arg": `k6.check(%s,{"check":(v)=>v})`, + "Callable": `k6.check(null,{"check": function() { return %s; }})`, + "Callable/Arg": `k6.check(%s,{"check": function(v) {return v; }})`, } testdata := map[string]bool{ `0`: false, diff --git a/js/modules/k6/marshalling_test.go b/js/modules/k6/marshalling_test.go index 0b05f070a97..8a55527ba7f 100644 --- a/js/modules/k6/marshalling_test.go +++ b/js/modules/k6/marshalling_test.go @@ -26,14 +26,15 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/loadimpact/k6/js" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/testutils/httpmultibin" "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/loader" "github.com/loadimpact/k6/stats" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestSetupDataMarshalling(t *testing.T) { @@ -129,14 +130,17 @@ func TestSetupDataMarshalling(t *testing.T) { require.NoError(t, err) - samples := make(chan stats.SampleContainer, 100) + samples := make(chan<- stats.SampleContainer, 100) if !assert.NoError(t, runner.Setup(context.Background(), samples)) { return } - vu, err := runner.NewVU(samples) + initVU, err := runner.NewVU(1, samples) if assert.NoError(t, err) { - err := vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err := vu.RunOnce() assert.NoError(t, err) } } diff --git a/js/modules/k6/metrics/metrics.go b/js/modules/k6/metrics/metrics.go index 4de9651f5e8..437b9bc0189 100644 --- a/js/modules/k6/metrics/metrics.go +++ b/js/modules/k6/metrics/metrics.go @@ -28,6 +28,7 @@ import ( "time" "github.com/dop251/goja" + "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/stats" @@ -73,11 +74,7 @@ func (m Metric) Add(ctx context.Context, v goja.Value, addTags ...map[string]str return false, ErrMetricsAddInInitContext } - tags := state.Options.RunTags.CloneTags() - if state.Options.SystemTags.Has(stats.TagGroup) { - tags["group"] = state.Group.Path - } - + tags := state.CloneTags() for _, ts := range addTags { for k, v := range ts { tags[k] = v diff --git a/js/modules/k6/metrics/metrics_test.go b/js/modules/k6/metrics/metrics_test.go index e99b104cbf7..c3fe9a70d5e 100644 --- a/js/modules/k6/metrics/metrics_test.go +++ b/js/modules/k6/metrics/metrics_test.go @@ -26,11 +26,12 @@ import ( "testing" "github.com/dop251/goja" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/stats" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestMetrics(t *testing.T) { @@ -72,6 +73,7 @@ func TestMetrics(t *testing.T) { Options: lib.Options{SystemTags: stats.NewSystemTagSet(stats.TagGroup)}, Group: root, Samples: samples, + Tags: map[string]string{"group": root.Path}, } isTimeString := "" @@ -79,7 +81,7 @@ func TestMetrics(t *testing.T) { isTimeString = `, true` } _, err := common.RunString(rt, - fmt.Sprintf(`let m = new metrics.%s("my_metric"%s)`, fn, isTimeString), + fmt.Sprintf(`var m = new metrics.%s("my_metric"%s)`, fn, isTimeString), ) if !assert.NoError(t, err) { return @@ -96,8 +98,10 @@ func TestMetrics(t *testing.T) { "Child": child, } for name, g := range groups { + name, g := name, g t.Run(name, func(t *testing.T) { state.Group = g + state.Tags["group"] = g.Path for name, val := range values { t.Run(name, func(t *testing.T) { t.Run("Simple", func(t *testing.T) { diff --git a/js/modules/k6/ws/ws.go b/js/modules/k6/ws/ws.go index 1302126cd98..55a973ac18d 100644 --- a/js/modules/k6/ws/ws.go +++ b/js/modules/k6/ws/ws.go @@ -34,6 +34,7 @@ import ( "github.com/dop251/goja" "github.com/gorilla/websocket" + "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/metrics" @@ -103,7 +104,7 @@ func (*WS) Connect(ctx context.Context, url string, args ...goja.Value) (*WSHTTP // Leave header to nil by default so we can pass it directly to the Dialer var header http.Header - tags := state.Options.RunTags.CloneTags() + tags := state.CloneTags() // Parse the optional second argument (params) if !goja.IsUndefined(paramsV) && !goja.IsNull(paramsV) { @@ -143,9 +144,6 @@ func (*WS) Connect(ctx context.Context, url string, args ...goja.Value) (*WSHTTP if state.Options.SystemTags.Has(stats.TagURL) { tags["url"] = url } - if state.Options.SystemTags.Has(stats.TagGroup) { - tags["group"] = state.Group.Path - } // Pass a custom net.Dial function to websocket.Dialer that will substitute // the underlying net.Conn with our own tracked netext.Conn @@ -249,7 +247,7 @@ func (*WS) Connect(ctx context.Context, url string, args ...goja.Value) (*WSHTTP readErrChan := make(chan error) // Wraps a couple of channels around conn.ReadMessage - go readPump(conn, readDataChan, readErrChan, readCloseChan) + go socket.readPump(readDataChan, readErrChan, readCloseChan) // This is the main control loop. All JS code (including error handlers) // should only be executed by this thread to avoid race conditions @@ -386,7 +384,11 @@ func (s *Socket) SetTimeout(fn goja.Callable, timeoutMs int) { go func() { select { case <-time.After(time.Duration(timeoutMs) * time.Millisecond): - s.scheduled <- fn + select { + case s.scheduled <- fn: + case <-s.done: + return + } case <-s.done: return @@ -404,7 +406,11 @@ func (s *Socket) SetInterval(fn goja.Callable, intervalMs int) { for { select { case <-ticker.C: - s.scheduled <- fn + select { + case s.scheduled <- fn: + case <-s.done: + return + } case <-s.done: return @@ -452,24 +458,35 @@ func (s *Socket) closeConnection(code int) error { } // Wraps conn.ReadMessage in a channel -func readPump(conn *websocket.Conn, readChan chan []byte, errorChan chan error, closeChan chan int) { +func (s *Socket) readPump(readChan chan []byte, errorChan chan error, closeChan chan int) { for { - _, message, err := conn.ReadMessage() + _, message, err := s.conn.ReadMessage() if err != nil { if websocket.IsUnexpectedCloseError( err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { // Report an unexpected closure - errorChan <- err + select { + case errorChan <- err: + case <-s.done: + return + } } code := websocket.CloseGoingAway if e, ok := err.(*websocket.CloseError); ok { code = e.Code } - closeChan <- code + select { + case closeChan <- code: + case <-s.done: + } return } - readChan <- message + select { + case readChan <- message: + case <-s.done: + return + } } } diff --git a/js/modules/k6/ws/ws_test.go b/js/modules/k6/ws/ws_test.go index f0090a831ba..174d270dc7f 100644 --- a/js/modules/k6/ws/ws_test.go +++ b/js/modules/k6/ws/ws_test.go @@ -31,12 +31,13 @@ import ( "github.com/dop251/goja" "github.com/gorilla/websocket" + "github.com/stretchr/testify/assert" + "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/metrics" "github.com/loadimpact/k6/lib/testutils/httpmultibin" "github.com/loadimpact/k6/stats" - "github.com/stretchr/testify/assert" ) func assertSessionMetricsEmitted(t *testing.T, sampleContainers []stats.SampleContainer, subprotocol, url string, status int, group string) { @@ -121,7 +122,7 @@ func TestSession(t *testing.T) { t.Run("connect_ws", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = ws.connect("WSBIN_URL/ws-echo", function(socket){ + var res = ws.connect("WSBIN_URL/ws-echo", function(socket){ socket.close() }); if (res.status != 101) { throw new Error("connection failed with status: " + res.status); } @@ -132,7 +133,7 @@ func TestSession(t *testing.T) { t.Run("connect_wss", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = ws.connect("WSSBIN_URL/ws-echo", function(socket){ + var res = ws.connect("WSSBIN_URL/ws-echo", function(socket){ socket.close() }); if (res.status != 101) { throw new Error("TLS connection failed with status: " + res.status); } @@ -143,8 +144,8 @@ func TestSession(t *testing.T) { t.Run("open", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let opened = false; - let res = ws.connect("WSBIN_URL/ws-echo", function(socket){ + var opened = false; + var res = ws.connect("WSBIN_URL/ws-echo", function(socket){ socket.on("open", function() { opened = true; socket.close() @@ -158,7 +159,7 @@ func TestSession(t *testing.T) { t.Run("send_receive", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = ws.connect("WSBIN_URL/ws-echo", function(socket){ + var res = ws.connect("WSBIN_URL/ws-echo", function(socket){ socket.on("open", function() { socket.send("test") }) @@ -180,8 +181,8 @@ func TestSession(t *testing.T) { t.Run("interval", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let counter = 0; - let res = ws.connect("WSBIN_URL/ws-echo", function(socket){ + var counter = 0; + var res = ws.connect("WSBIN_URL/ws-echo", function(socket){ socket.setInterval(function () { counter += 1; if (counter > 2) { socket.close(); } @@ -195,9 +196,9 @@ func TestSession(t *testing.T) { t.Run("timeout", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let start = new Date().getTime(); - let ellapsed = new Date().getTime() - start; - let res = ws.connect("WSBIN_URL/ws-echo", function(socket){ + var start = new Date().getTime(); + var ellapsed = new Date().getTime() - start; + var res = ws.connect("WSBIN_URL/ws-echo", function(socket){ socket.setTimeout(function () { ellapsed = new Date().getTime() - start; socket.close(); @@ -213,8 +214,8 @@ func TestSession(t *testing.T) { t.Run("ping", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let pongReceived = false; - let res = ws.connect("WSBIN_URL/ws-echo", function(socket){ + var pongReceived = false; + var res = ws.connect("WSBIN_URL/ws-echo", function(socket){ socket.on("open", function(data) { socket.ping(); }); @@ -237,10 +238,10 @@ func TestSession(t *testing.T) { t.Run("multiple_handlers", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let pongReceived = false; - let otherPongReceived = false; + var pongReceived = false; + var otherPongReceived = false; - let res = ws.connect("WSBIN_URL/ws-echo", function(socket){ + var res = ws.connect("WSBIN_URL/ws-echo", function(socket){ socket.on("open", function(data) { socket.ping(); }); @@ -271,8 +272,8 @@ func TestSession(t *testing.T) { t.Run("client_close", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let closed = false; - let res = ws.connect("WSBIN_URL/ws-echo", function(socket){ + var closed = false; + var res = ws.connect("WSBIN_URL/ws-echo", function(socket){ socket.on("open", function() { socket.close() }) @@ -301,8 +302,8 @@ func TestSession(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { _, err := common.RunString(rt, sr(fmt.Sprintf(` - let closed = false; - let res = ws.connect("WSBIN_URL%s", function(socket){ + var closed = false; + var res = ws.connect("WSBIN_URL%s", function(socket){ socket.on("open", function() { socket.send("test"); }) @@ -346,7 +347,7 @@ func TestErrors(t *testing.T) { t.Run("invalid_url", func(t *testing.T) { _, err := common.RunString(rt, ` - let res = ws.connect("INVALID", function(socket){ + var res = ws.connect("INVALID", function(socket){ socket.on("open", function() { socket.close(); }); @@ -358,7 +359,7 @@ func TestErrors(t *testing.T) { t.Run("invalid_url_message_panic", func(t *testing.T) { // Attempting to send a message to a non-existent socket shouldn't panic _, err := common.RunString(rt, ` - let res = ws.connect("INVALID", function(socket){ + var res = ws.connect("INVALID", function(socket){ socket.send("new message"); }); `) @@ -367,7 +368,7 @@ func TestErrors(t *testing.T) { t.Run("error_in_setup", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let res = ws.connect("WSBIN_URL/ws-echo-invalid", function(socket){ + var res = ws.connect("WSBIN_URL/ws-echo-invalid", function(socket){ throw new Error("error in setup"); }); `)) @@ -376,8 +377,8 @@ func TestErrors(t *testing.T) { t.Run("send_after_close", func(t *testing.T) { _, err := common.RunString(rt, sr(` - let hasError = false; - let res = ws.connect("WSBIN_URL/ws-echo-invalid", function(socket){ + var hasError = false; + var res = ws.connect("WSBIN_URL/ws-echo-invalid", function(socket){ socket.on("open", function() { socket.close(); socket.send("test"); @@ -398,7 +399,7 @@ func TestErrors(t *testing.T) { t.Run("error on close", func(t *testing.T) { _, err := common.RunString(rt, sr(` var closed = false; - let res = ws.connect("WSBIN_URL/ws-close", function(socket){ + var res = ws.connect("WSBIN_URL/ws-close", function(socket){ socket.on('open', function open() { socket.setInterval(function timeout() { socket.ping(); @@ -461,7 +462,7 @@ func TestSystemTags(t *testing.T) { t.Run("only "+expectedTag, func(t *testing.T) { state.Options.SystemTags = stats.ToSystemTagSet([]string{expectedTag}) _, err := common.RunString(rt, sr(` - let res = ws.connect("WSBIN_URL/ws-echo", function(socket){ + var res = ws.connect("WSBIN_URL/ws-echo", function(socket){ socket.on("open", function() { socket.send("test") }) @@ -525,7 +526,7 @@ func TestTLSConfig(t *testing.T) { } _, err := common.RunString(rt, sr(` - let res = ws.connect("WSSBIN_URL/ws-close", function(socket){ + var res = ws.connect("WSSBIN_URL/ws-close", function(socket){ socket.close() }); if (res.status != 101) { throw new Error("TLS connection failed with status: " + res.status); } @@ -538,7 +539,7 @@ func TestTLSConfig(t *testing.T) { state.TLSConfig = tb.TLSClientConfig _, err := common.RunString(rt, sr(` - let res = ws.connect("WSSBIN_URL/ws-close", function(socket){ + var res = ws.connect("WSSBIN_URL/ws-close", function(socket){ socket.close() }); if (res.status != 101) { @@ -580,7 +581,8 @@ func TestReadPump(t *testing.T) { msgChan := make(chan []byte) errChan := make(chan error) closeChan := make(chan int) - go readPump(conn, msgChan, errChan, closeChan) + s := &Socket{conn: conn} + go s.readPump(msgChan, errChan, closeChan) readChans: for { diff --git a/js/runner.go b/js/runner.go index 03da8c07bd8..a25ce877c69 100644 --- a/js/runner.go +++ b/js/runner.go @@ -24,11 +24,11 @@ import ( "context" "crypto/tls" "encoding/json" + "fmt" "net" "net/http" "net/http/cookiejar" "strconv" - "sync" "time" "github.com/dop251/goja" @@ -42,17 +42,14 @@ import ( "github.com/loadimpact/k6/js/common" "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/consts" "github.com/loadimpact/k6/lib/netext" "github.com/loadimpact/k6/loader" "github.com/loadimpact/k6/stats" ) //nolint:gochecknoglobals -var ( - errInterrupt = errors.New("context cancelled") - stageSetup = "setup" - stageTeardown = "teardown" -) +var errInterrupt = errors.New("context cancelled") // Ensure Runner implements the lib.Runner interface var _ lib.Runner = &Runner{} @@ -114,15 +111,17 @@ func (r *Runner) MakeArchive() *lib.Archive { return r.Bundle.makeArchive() } -func (r *Runner) NewVU(samplesOut chan<- stats.SampleContainer) (lib.VU, error) { - vu, err := r.newVU(samplesOut) +// NewVU returns a new initialized VU. +func (r *Runner) NewVU(id int64, samplesOut chan<- stats.SampleContainer) (lib.InitializedVU, error) { + vu, err := r.newVU(id, samplesOut) if err != nil { return nil, err } - return lib.VU(vu), nil + return lib.InitializedVU(vu), nil } -func (r *Runner) newVU(samplesOut chan<- stats.SampleContainer) (*VU, error) { +// nolint:funlen +func (r *Runner) newVU(id int64, samplesOut chan<- stats.SampleContainer) (*VU, error) { // Instantiate a new bundle, make a VU out of it. bi, err := r.Bundle.Instantiate() if err != nil { @@ -185,6 +184,7 @@ func (r *Runner) newVU(samplesOut chan<- stats.SampleContainer) (*VU, error) { } vu := &VU{ + ID: id, BundleInstance: *bi, Runner: r, Transport: transport, @@ -194,23 +194,34 @@ func (r *Runner) newVU(samplesOut chan<- stats.SampleContainer) (*VU, error) { Console: r.console, BPool: bpool.NewBufferPool(100), Samples: samplesOut, - m: &sync.Mutex{}, } + + vu.state = &lib.State{ + Logger: vu.Runner.Logger, + Options: vu.Runner.Bundle.Options, + Transport: vu.Transport, + Dialer: vu.Dialer, + TLSConfig: vu.TLSConfig, + CookieJar: cookieJar, + RPSLimit: vu.Runner.RPSLimit, + BPool: vu.BPool, + Vu: vu.ID, + Samples: vu.Samples, + Iteration: vu.Iteration, + Tags: vu.Runner.Bundle.Options.RunTags.CloneTags(), + Group: r.defaultGroup, + } + vu.Runtime.Set("__VU", vu.ID) vu.Runtime.Set("console", common.Bind(vu.Runtime, vu.Console, vu.Context)) + + // This is here mostly so if someone tries they get a nice message + // instead of "Value is not an object: undefined ..." common.BindToGlobal(vu.Runtime, map[string]interface{}{ "open": func() { - common.Throw(vu.Runtime, errors.New( - `The "open()" function is only available to init code (aka the global scope), see `+ - ` https://k6.io/docs/using-k6/test-life-cycle for more information`, - )) + common.Throw(vu.Runtime, errors.New(openCantBeUsedOutsideInitContextMsg)) }, }) - // Give the VU an initial sense of identity. - if err := vu.Reconfigure(0); err != nil { - return nil, err - } - return vu, nil } @@ -221,9 +232,9 @@ func (r *Runner) Setup(ctx context.Context, out chan<- stats.SampleContainer) er ) defer setupCancel() - v, err := r.runPart(setupCtx, out, stageSetup, nil) + v, err := r.runPart(setupCtx, out, consts.SetupFn, nil) if err != nil { - return errors.Wrap(err, stageSetup) + return err } // r.setupData = nil is special it means undefined from this moment forward if goja.IsUndefined(v) { @@ -233,7 +244,7 @@ func (r *Runner) Setup(ctx context.Context, out chan<- stats.SampleContainer) er r.setupData, err = json.Marshal(v.Export()) if err != nil { - return errors.Wrap(err, stageSetup) + return errors.Wrap(err, consts.SetupFn) } var tmp interface{} return json.Unmarshal(r.setupData, &tmp) @@ -259,12 +270,12 @@ func (r *Runner) Teardown(ctx context.Context, out chan<- stats.SampleContainer) var data interface{} if r.setupData != nil { if err := json.Unmarshal(r.setupData, &data); err != nil { - return errors.Wrap(err, stageTeardown) + return errors.Wrap(err, consts.TeardownFn) } } else { data = goja.Undefined() } - _, err := r.runPart(teardownCtx, out, stageTeardown, data) + _, err := r.runPart(teardownCtx, out, consts.TeardownFn, data) return err } @@ -276,6 +287,13 @@ func (r *Runner) GetOptions() lib.Options { return r.Bundle.Options } +// IsExecutable returns whether the given name is an exported and +// executable function in the script. +func (r *Runner) IsExecutable(name string) bool { + _, exists := r.Bundle.exports[name] + return exists +} + func (r *Runner) SetOptions(opts lib.Options) error { r.Bundle.Options = opts @@ -284,6 +302,8 @@ func (r *Runner) SetOptions(opts lib.Options) error { r.RPSLimit = rate.NewLimiter(rate.Limit(rps.Int64), 1) } + // TODO: validate that all exec values are either nil or valid exported methods (or HTTP requests in the future) + if opts.ConsoleOutput.Valid { c, err := newFileConsole(opts.ConsoleOutput.String) if err != nil { @@ -299,7 +319,7 @@ func (r *Runner) SetOptions(opts lib.Options) error { // Runs an exported function in its own temporary VU, optionally with an argument. Execution is // interrupted if the context expires. No error is returned if the part does not exist. func (r *Runner) runPart(ctx context.Context, out chan<- stats.SampleContainer, name string, arg interface{}) (goja.Value, error) { - vu, err := r.newVU(out) + vu, err := r.newVU(0, out) if err != nil { return goja.Undefined(), err } @@ -312,24 +332,33 @@ func (r *Runner) runPart(ctx context.Context, out chan<- stats.SampleContainer, return goja.Undefined(), nil } + ctx = common.WithRuntime(ctx, vu.Runtime) + ctx = lib.WithState(ctx, vu.state) ctx, cancel := context.WithCancel(ctx) defer cancel() go func() { <-ctx.Done() vu.Runtime.Interrupt(errInterrupt) }() + *vu.Context = ctx group, err := lib.NewGroup(name, r.GetDefaultGroup()) if err != nil { return goja.Undefined(), err } - v, _, _, err := vu.runFn(ctx, group, false, fn, vu.Runtime.ToValue(arg)) + if r.Bundle.Options.SystemTags.Has(stats.TagGroup) { + vu.state.Tags["group"] = group.Path + } + vu.state.Group = group + + v, _, _, err := vu.runFn(ctx, false, fn, vu.Runtime.ToValue(arg)) // deadline is reached so we have timeouted but this might've not been registered correctly if deadline, ok := ctx.Deadline(); ok && time.Now().After(deadline) { // we could have an error that is not errInterrupt in which case we should return it instead if err, ok := err.(*goja.InterruptedError); ok && v != nil && err.Value() != errInterrupt { + // TODO: silence this error? return v, err } // otherwise we have timeouted @@ -342,9 +371,9 @@ func (r *Runner) runPart(ctx context.Context, out chan<- stats.SampleContainer, func (r *Runner) timeoutErrorDuration(stage string) time.Duration { d := time.Duration(0) switch stage { - case stageSetup: + case consts.SetupFn: return time.Duration(r.Bundle.Options.SetupTimeout.Duration) - case stageTeardown: + case consts.TeardownFn: return time.Duration(r.Bundle.Options.TeardownTimeout.Duration) } return d @@ -368,49 +397,102 @@ type VU struct { setupData goja.Value - // A VU will track the last context it was called with for cancellation. - // Note that interruptTrackedCtx is the context that is currently being tracked, while - // interruptCancel cancels an unrelated context that terminates the tracking goroutine - // without triggering an interrupt (for if the context changes). - // There are cleaner ways of handling the interruption problem, but this is a hot path that - // needs to be called thousands of times per second, which rules out anything that spawns a - // goroutine per call. - interruptTrackedCtx context.Context - interruptCancel context.CancelFunc - - m *sync.Mutex + state *lib.State } -// Verify that VU implements lib.VU -var _ lib.VU = &VU{} +// Verify that interfaces are implemented +var ( + _ lib.ActiveVU = &ActiveVU{} + _ lib.InitializedVU = &VU{} +) -func (u *VU) Reconfigure(id int64) error { - u.ID = id - u.Iteration = 0 - u.Runtime.Set("__VU", u.ID) - return nil +// ActiveVU holds a VU and its activation parameters +type ActiveVU struct { + *VU + *lib.VUActivationParams + busy chan struct{} } -func (u *VU) RunOnce(ctx context.Context) error { - u.m.Lock() - defer u.m.Unlock() - // Track the context and interrupt JS execution if it's cancelled. - if u.interruptTrackedCtx != ctx { - interCtx, interCancel := context.WithCancel(context.Background()) - if u.interruptCancel != nil { - u.interruptCancel() +// GetID returns the unique VU ID. +func (u *VU) GetID() int64 { + return u.ID +} + +// Activate the VU so it will be able to run code. +func (u *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { + u.Runtime.ClearInterrupt() + + if params.Exec == "" { + params.Exec = consts.DefaultFn + } + + // Override the preset global env with any custom env vars + env := make(map[string]string, len(u.env)+len(params.Env)) + for key, value := range u.env { + env[key] = value + } + for key, value := range params.Env { + env[key] = value + } + u.Runtime.Set("__ENV", env) + + opts := u.Runner.Bundle.Options + // TODO: maybe we can cache the original tags only clone them and add (if any) new tags on top ? + u.state.Tags = opts.RunTags.CloneTags() + for k, v := range params.Tags { + u.state.Tags[k] = v + } + if opts.SystemTags.Has(stats.TagVU) { + u.state.Tags["vu"] = strconv.FormatInt(u.ID, 10) + } + if opts.SystemTags.Has(stats.TagIter) { + u.state.Tags["iter"] = strconv.FormatInt(u.Iteration, 10) + } + if opts.SystemTags.Has(stats.TagGroup) { + u.state.Tags["group"] = u.state.Group.Path + } + if opts.SystemTags.Has(stats.TagScenario) { + u.state.Tags["scenario"] = params.Scenario + } + + params.RunContext = common.WithRuntime(params.RunContext, u.Runtime) + params.RunContext = lib.WithState(params.RunContext, u.state) + *u.Context = params.RunContext + + avu := &ActiveVU{ + VU: u, + VUActivationParams: params, + busy: make(chan struct{}, 1), + } + + go func() { + // Wait for the run context to be over + <-params.RunContext.Done() + // Interrupt the JS runtime + u.Runtime.Interrupt(errInterrupt) + // Wait for the VU to stop running, if it was, and prevent it from + // running again for this activation + avu.busy <- struct{}{} + + if params.DeactivateCallback != nil { + params.DeactivateCallback(u) } - u.interruptCancel = interCancel - u.interruptTrackedCtx = ctx - defer interCancel() - go func() { - select { - case <-interCtx.Done(): - case <-ctx.Done(): - u.Runtime.Interrupt(errInterrupt) - } - }() + }() + + return avu +} + +// RunOnce runs the configured Exec function once. +func (u *ActiveVU) RunOnce() error { + select { + case <-u.RunContext.Done(): + return u.RunContext.Err() // we are done, return + case u.busy <- struct{}{}: + // nothing else can run now, and the VU cannot be deactivated } + defer func() { + <-u.busy // unlock deactivation again + }() // Unmarshall the setupData only the first time for each VU so that VUs are isolated but we // still don't use too much CPU in the middle test @@ -426,8 +508,14 @@ func (u *VU) RunOnce(ctx context.Context) error { } } - // Call the default function. - _, isFullIteration, totalTime, err := u.runFn(ctx, u.Runner.defaultGroup, true, u.Default, u.setupData) + fn, ok := u.exports[u.Exec] + if !ok { + // Shouldn't happen; this is validated in cmd.validateScenarioConfig() + panic(fmt.Sprintf("function '%s' not found in exports", u.Exec)) + } + + // Call the exported function. + _, isFullIteration, totalTime, err := u.runFn(u.RunContext, true, fn, u.setupData) // If MinIterationDuration is specified and the iteration wasn't cancelled // and was less than it, sleep for the remainder @@ -442,38 +530,25 @@ func (u *VU) RunOnce(ctx context.Context) error { } func (u *VU) runFn( - ctx context.Context, group *lib.Group, isDefault bool, fn goja.Callable, args ...goja.Value, + ctx context.Context, isDefault bool, fn goja.Callable, args ...goja.Value, ) (goja.Value, bool, time.Duration, error) { - cookieJar := u.CookieJar if !u.Runner.Bundle.Options.NoCookiesReset.ValueOrZero() { var err error - cookieJar, err = cookiejar.New(nil) + u.state.CookieJar, err = cookiejar.New(nil) if err != nil { return goja.Undefined(), false, time.Duration(0), err } } - state := &lib.State{ - Logger: u.Runner.Logger, - Options: u.Runner.Bundle.Options, - Group: group, - Transport: u.Transport, - Dialer: u.Dialer, - TLSConfig: u.TLSConfig, - CookieJar: cookieJar, - RPSLimit: u.Runner.RPSLimit, - BPool: u.BPool, - Vu: u.ID, - Samples: u.Samples, - Iteration: u.Iteration, + opts := &u.Runner.Bundle.Options + if opts.SystemTags.Has(stats.TagIter) { + u.state.Tags["iter"] = strconv.FormatInt(u.Iteration, 10) } - newctx := common.WithRuntime(ctx, u.Runtime) - newctx = lib.WithState(newctx, state) - *u.Context = newctx - + // TODO: this seems like the wrong place for the iteration incrementation + // also this means that teardown and setup have __ITER defined + // maybe move it to RunOnce ? u.Runtime.Set("__ITER", u.Iteration) - iter := u.Iteration u.Iteration++ startTime := time.Now() @@ -488,22 +563,11 @@ func (u *VU) runFn( isFullIteration = true } - tags := state.Options.RunTags.CloneTags() - if state.Options.SystemTags.Has(stats.TagVU) { - tags["vu"] = strconv.FormatInt(u.ID, 10) - } - if state.Options.SystemTags.Has(stats.TagIter) { - tags["iter"] = strconv.FormatInt(iter, 10) - } - if state.Options.SystemTags.Has(stats.TagGroup) { - tags["group"] = group.Path - } - if u.Runner.Bundle.Options.NoVUConnectionReuse.Bool { u.Transport.CloseIdleConnections() } - state.Samples <- u.Dialer.GetTrail(startTime, endTime, isFullIteration, isDefault, stats.IntoSampleTags(&tags)) + u.state.Samples <- u.Dialer.GetTrail(startTime, endTime, isFullIteration, isDefault, stats.NewSampleTags(u.state.Tags)) return v, isFullIteration, endTime.Sub(startTime), err } diff --git a/js/runner_test.go b/js/runner_test.go index 13f2c3a0eda..27276da42c2 100644 --- a/js/runner_test.go +++ b/js/runner_test.go @@ -31,12 +31,20 @@ import ( stdlog "log" "net" "net/http" + "net/url" "os" - "strings" "sync" "testing" "time" + "github.com/sirupsen/logrus" + + logtest "github.com/sirupsen/logrus/hooks/test" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/core" "github.com/loadimpact/k6/core/local" "github.com/loadimpact/k6/js/common" @@ -45,35 +53,34 @@ import ( k6metrics "github.com/loadimpact/k6/js/modules/k6/metrics" "github.com/loadimpact/k6/js/modules/k6/ws" "github.com/loadimpact/k6/lib" + _ "github.com/loadimpact/k6/lib/executor" // TODO: figure out something better "github.com/loadimpact/k6/lib/metrics" "github.com/loadimpact/k6/lib/testutils/httpmultibin" "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/stats" "github.com/loadimpact/k6/stats/dummy" - logtest "github.com/sirupsen/logrus/hooks/test" - "github.com/spf13/afero" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - null "gopkg.in/guregu/null.v3" ) func TestRunnerNew(t *testing.T) { t.Run("Valid", func(t *testing.T) { r, err := getSimpleRunner("/script.js", ` - let counter = 0; - export default function() { counter++; } + var counter = 0; + exports.default = function() { counter++; } `) assert.NoError(t, err) t.Run("NewVU", func(t *testing.T) { - vu, err := r.NewVU(make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) assert.NoError(t, err) - vuc, ok := vu.(*VU) + vuc, ok := initVU.(*VU) assert.True(t, ok) assert.Equal(t, int64(0), vuc.Runtime.Get("counter").Export()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) t.Run("RunOnce", func(t *testing.T) { - err = vu.RunOnce(context.Background()) + err = vu.RunOnce() assert.NoError(t, err) assert.Equal(t, int64(1), vuc.Runtime.Get("counter").Export()) }) @@ -87,7 +94,7 @@ func TestRunnerNew(t *testing.T) { } func TestRunnerGetDefaultGroup(t *testing.T) { - r1, err := getSimpleRunner("/script.js", `export default function() {};`) + r1, err := getSimpleRunner("/script.js", `exports.default = function() {};`) if assert.NoError(t, err) { assert.NotNil(t, r1.GetDefaultGroup()) } @@ -99,7 +106,7 @@ func TestRunnerGetDefaultGroup(t *testing.T) { } func TestRunnerOptions(t *testing.T) { - r1, err := getSimpleRunner("/script.js", `export default function() {};`) + r1, err := getSimpleRunner("/script.js", `exports.default = function() {};`) if !assert.NoError(t, err) { return } @@ -129,10 +136,10 @@ func TestOptionsSettingToScript(t *testing.T) { optionVariants := []string{ "", - "let options = null;", - "let options = undefined;", - "let options = {};", - "let options = {teardownTimeout: '1s'};", + "var options = null;", + "var options = undefined;", + "var options = {};", + "var options = {teardownTimeout: '1s'};", } for i, variant := range optionVariants { @@ -140,7 +147,7 @@ func TestOptionsSettingToScript(t *testing.T) { t.Run(fmt.Sprintf("Variant#%d", i), func(t *testing.T) { t.Parallel() data := variant + ` - export default function() { + exports.default = function() { if (!options) { throw new Error("Expected options to be defined!"); } @@ -148,7 +155,7 @@ func TestOptionsSettingToScript(t *testing.T) { throw new Error("expected teardownTimeout to be " + __ENV.expectedTeardownTimeout + " but it was " + options.teardownTimeout); } };` - r, err := getSimpleRunnerWithOptions("/script.js", data, + r, err := getSimpleRunner("/script.js", data, lib.RuntimeOptions{Env: map[string]string{"expectedTeardownTimeout": "4s"}}) require.NoError(t, err) @@ -157,9 +164,12 @@ func TestOptionsSettingToScript(t *testing.T) { require.Equal(t, newOptions, r.GetOptions()) samples := make(chan stats.SampleContainer, 100) - vu, err := r.NewVU(samples) + initVU, err := r.NewVU(1, samples) if assert.NoError(t, err) { - err := vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err := vu.RunOnce() assert.NoError(t, err) } }) @@ -169,8 +179,9 @@ func TestOptionsSettingToScript(t *testing.T) { func TestOptionsPropagationToScript(t *testing.T) { t.Parallel() data := ` - export let options = { setupTimeout: "1s", myOption: "test" }; - export default function() { + var options = { setupTimeout: "1s", myOption: "test" }; + exports.options = options; + exports.default = function() { if (options.external) { throw new Error("Unexpected property external!"); } @@ -183,7 +194,7 @@ func TestOptionsPropagationToScript(t *testing.T) { };` expScriptOptions := lib.Options{SetupTimeout: types.NullDurationFrom(1 * time.Second)} - r1, err := getSimpleRunnerWithOptions("/script.js", data, + r1, err := getSimpleRunner("/script.js", data, lib.RuntimeOptions{Env: map[string]string{"expectedSetupTimeout": "1s"}}) require.NoError(t, err) require.Equal(t, expScriptOptions, r1.GetOptions()) @@ -194,17 +205,21 @@ func TestOptionsPropagationToScript(t *testing.T) { require.Equal(t, expScriptOptions, r2.GetOptions()) newOptions := lib.Options{SetupTimeout: types.NullDurationFrom(3 * time.Second)} - r2.SetOptions(newOptions) + require.NoError(t, r2.SetOptions(newOptions)) require.Equal(t, newOptions, r2.GetOptions()) testdata := map[string]*Runner{"Source": r1, "Archive": r2} for name, r := range testdata { + r := r t.Run(name, func(t *testing.T) { samples := make(chan stats.SampleContainer, 100) - vu, err := r.NewVU(samples) + initVU, err := r.NewVU(1, samples) if assert.NoError(t, err) { - err := vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err := vu.RunOnce() assert.NoError(t, err) } }) @@ -216,11 +231,11 @@ func TestMetricName(t *testing.T) { defer tb.Cleanup() script := tb.Replacer.Replace(` - import { Counter } from "k6/metrics"; + var Counter = require("k6/metrics").Counter; - let myCounter = new Counter("not ok name @"); + var myCounter = new Counter("not ok name @"); - export default function(data) { + exports.default = function(data) { myCounter.add(1); } `) @@ -234,22 +249,26 @@ func TestSetupDataIsolation(t *testing.T) { defer tb.Cleanup() script := tb.Replacer.Replace(` - import { Counter } from "k6/metrics"; - - export let options = { - vus: 2, - vusMax: 10, - iterations: 500, - teardownTimeout: "1s", - setupTimeout: "1s", + var Counter = require("k6/metrics").Counter; + + exports.options = { + scenarios: { + shared_iters: { + executor: "shared-iterations", + vus: 5, + iterations: 500, + }, + }, + teardownTimeout: "5s", + setupTimeout: "5s", }; - let myCounter = new Counter("mycounter"); + var myCounter = new Counter("mycounter"); - export function setup() { + exports.setup = function() { return { v: 0 }; } - export default function(data) { + exports.default = function(data) { if (data.v !== __ITER) { throw new Error("default: wrong data for iter " + __ITER + ": " + JSON.stringify(data)); } @@ -257,7 +276,7 @@ func TestSetupDataIsolation(t *testing.T) { myCounter.add(1); } - export function teardown(data) { + exports.teardown = function(data) { if (data.v !== 0) { throw new Error("teardown: wrong data: " + data.v); } @@ -268,15 +287,23 @@ func TestSetupDataIsolation(t *testing.T) { runner, err := getSimpleRunner("/script.js", script) require.NoError(t, err) - engine, err := core.NewEngine(local.New(runner), runner.GetOptions()) + options := runner.GetOptions() + require.Empty(t, options.Validate()) + + execScheduler, err := local.NewExecutionScheduler(runner, logrus.StandardLogger()) + require.NoError(t, err) + engine, err := core.NewEngine(execScheduler, options, logrus.StandardLogger()) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + run, wait, err := engine.Init(ctx, ctx) require.NoError(t, err) collector := &dummy.Collector{} engine.Collectors = []lib.Collector{collector} - ctx, cancel := context.WithCancel(context.Background()) errC := make(chan error) - go func() { errC <- engine.Run(ctx) }() + go func() { errC <- run() }() select { case <-time.After(10 * time.Second): @@ -285,6 +312,7 @@ func TestSetupDataIsolation(t *testing.T) { case err := <-errC: cancel() require.NoError(t, err) + wait() require.False(t, engine.IsTainted()) } var count int @@ -308,15 +336,19 @@ func testSetupDataHelper(t *testing.T, data string) { testdata := map[string]*Runner{"Source": r1} for name, r := range testdata { + r := r t.Run(name, func(t *testing.T) { samples := make(chan stats.SampleContainer, 100) if !assert.NoError(t, r.Setup(context.Background(), samples)) { return } - vu, err := r.NewVU(samples) + initVU, err := r.NewVU(1, samples) if assert.NoError(t, err) { - err := vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err := vu.RunOnce() assert.NoError(t, err) } }) @@ -325,17 +357,17 @@ func testSetupDataHelper(t *testing.T, data string) { func TestSetupDataReturnValue(t *testing.T) { testSetupDataHelper(t, ` - export let options = { setupTimeout: "1s", teardownTimeout: "1s" }; - export function setup() { + exports.options = { setupTimeout: "1s", teardownTimeout: "1s" }; + exports.setup = function() { return 42; } - export default function(data) { + exports.default = function(data) { if (data != 42) { throw new Error("default: wrong data: " + JSON.stringify(data)) } }; - export function teardown(data) { + exports.teardown = function(data) { if (data != 42) { throw new Error("teardown: wrong data: " + JSON.stringify(data)) } @@ -344,14 +376,14 @@ func TestSetupDataReturnValue(t *testing.T) { func TestSetupDataNoSetup(t *testing.T) { testSetupDataHelper(t, ` - export let options = { setupTimeout: "1s", teardownTimeout: "1s" }; - export default function(data) { + exports.options = { setupTimeout: "1s", teardownTimeout: "1s" }; + exports.default = function(data) { if (data !== undefined) { throw new Error("default: wrong data: " + JSON.stringify(data)) } }; - export function teardown(data) { + exports.teardown = function(data) { if (data !== undefined) { console.log(data); throw new Error("teardown: wrong data: " + JSON.stringify(data)) @@ -362,7 +394,7 @@ func TestSetupDataNoSetup(t *testing.T) { func TestConsoleInInitContext(t *testing.T) { r1, err := getSimpleRunner("/script.js", ` console.log("1"); - export default function(data) { + exports.default = function(data) { }; `) require.NoError(t, err) @@ -372,9 +404,12 @@ func TestConsoleInInitContext(t *testing.T) { r := r t.Run(name, func(t *testing.T) { samples := make(chan stats.SampleContainer, 100) - vu, err := r.NewVU(samples) + initVU, err := r.NewVU(1, samples) if assert.NoError(t, err) { - err := vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err := vu.RunOnce() assert.NoError(t, err) } }) @@ -383,15 +418,15 @@ func TestConsoleInInitContext(t *testing.T) { func TestSetupDataNoReturn(t *testing.T) { testSetupDataHelper(t, ` - export let options = { setupTimeout: "1s", teardownTimeout: "1s" }; - export function setup() { } - export default function(data) { + exports.options = { setupTimeout: "1s", teardownTimeout: "1s" }; + exports.setup = function() { } + exports.default = function(data) { if (data !== undefined) { throw new Error("default: wrong data: " + JSON.stringify(data)) } }; - export function teardown(data) { + exports.teardown = function(data) { if (data !== undefined) { throw new Error("teardown: wrong data: " + JSON.stringify(data)) } @@ -406,11 +441,12 @@ func TestRunnerIntegrationImports(t *testing.T) { "k6/metrics", "k6/html", } + rtOpts := lib.RuntimeOptions{CompatibilityMode: null.StringFrom("extended")} for _, mod := range modules { mod := mod t.Run(mod, func(t *testing.T) { t.Run("Source", func(t *testing.T) { - _, err := getSimpleRunner("/script.js", fmt.Sprintf(`import "%s"; export default function() {}`, mod)) + _, err := getSimpleRunner("/script.js", fmt.Sprintf(`import "%s"; exports.default = function() {}`, mod), rtOpts) assert.NoError(t, err) }) }) @@ -420,7 +456,7 @@ func TestRunnerIntegrationImports(t *testing.T) { t.Run("Files", func(t *testing.T) { fs := afero.NewMemMapFs() require.NoError(t, fs.MkdirAll("/path/to", 0755)) - require.NoError(t, afero.WriteFile(fs, "/path/to/lib.js", []byte(`export default "hi!";`), 0644)) + require.NoError(t, afero.WriteFile(fs, "/path/to/lib.js", []byte(`exports.default = "hi!";`), 0644)) testdata := map[string]struct{ filename, path string }{ "Absolute": {"/path/script.js", "/path/to/lib.js"}, @@ -432,9 +468,9 @@ func TestRunnerIntegrationImports(t *testing.T) { for name, data := range testdata { name, data := name, data t.Run(name, func(t *testing.T) { - r1, err := getSimpleRunnerWithFileFs(data.filename, fmt.Sprintf(` - import hi from "%s"; - export default function() { + r1, err := getSimpleRunner(data.filename, fmt.Sprintf(` + var hi = require("%s").default; + exports.default = function() { if (hi != "hi!") { throw new Error("incorrect value"); } }`, data.path), fs) require.NoError(t, err) @@ -446,9 +482,12 @@ func TestRunnerIntegrationImports(t *testing.T) { for name, r := range testdata { r := r t.Run(name, func(t *testing.T) { - vu, err := r.NewVU(make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) require.NoError(t, err) - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() require.NoError(t, err) }) } @@ -459,8 +498,8 @@ func TestRunnerIntegrationImports(t *testing.T) { func TestVURunContext(t *testing.T) { r1, err := getSimpleRunner("/script.js", ` - export let options = { vus: 10 }; - export default function() { fn(); } + exports.options = { vus: 10 }; + exports.default = function() { fn(); } `) require.NoError(t, err) r1.SetOptions(r1.GetOptions().Apply(lib.Options{Throw: null.BoolFrom(true)})) @@ -472,8 +511,9 @@ func TestVURunContext(t *testing.T) { testdata := map[string]*Runner{"Source": r1, "Archive": r2} for name, r := range testdata { + r := r t.Run(name, func(t *testing.T) { - vu, err := r.newVU(make(chan stats.SampleContainer, 100)) + vu, err := r.newVU(1, make(chan stats.SampleContainer, 100)) if !assert.NoError(t, err) { return } @@ -493,7 +533,10 @@ func TestVURunContext(t *testing.T) { assert.Equal(t, vu.Transport, state.Transport) } }) - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + activeVU := vu.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = activeVU.RunOnce() assert.NoError(t, err) assert.True(t, fnCalled, "fn() not called") }) @@ -507,7 +550,7 @@ func TestVURunInterrupt(t *testing.T) { } r1, err := getSimpleRunner("/script.js", ` - export default function() { while(true) {} } + exports.default = function() { while(true) {} } `) require.NoError(t, err) require.NoError(t, r1.SetOptions(lib.Options{Throw: null.BoolFrom(true)})) @@ -518,8 +561,6 @@ func TestVURunInterrupt(t *testing.T) { for name, r := range testdata { name, r := name, r t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() samples := make(chan stats.SampleContainer, 100) defer close(samples) go func() { @@ -527,12 +568,15 @@ func TestVURunInterrupt(t *testing.T) { } }() - vu, err := r.newVU(samples) + vu, err := r.newVU(1, samples) require.NoError(t, err) - err = vu.RunOnce(ctx) + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) + defer cancel() + activeVU := vu.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = activeVU.RunOnce() assert.Error(t, err) - assert.True(t, strings.HasPrefix(err.Error(), "context cancelled at ")) + assert.Contains(t, err.Error(), "context cancelled") }) } } @@ -544,7 +588,7 @@ func TestVURunInterruptDoesntPanic(t *testing.T) { } r1, err := getSimpleRunner("/script.js", ` - export default function() { while(true) {} } + exports.default = function() { while(true) {} } `) require.NoError(t, err) require.NoError(t, r1.SetOptions(lib.Options{Throw: null.BoolFrom(true)})) @@ -555,42 +599,44 @@ func TestVURunInterruptDoesntPanic(t *testing.T) { for name, r := range testdata { name, r := name, r t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() samples := make(chan stats.SampleContainer, 100) - defer close(samples) go func() { for range samples { } }() var wg sync.WaitGroup - vu, err := r.newVU(samples) + initVU, err := r.newVU(1, samples) require.NoError(t, err) for i := 0; i < 1000; i++ { wg.Add(1) newCtx, newCancel := context.WithCancel(ctx) + vu := initVU.Activate(&lib.VUActivationParams{ + RunContext: newCtx, + DeactivateCallback: func(_ lib.InitializedVU) { wg.Done() }, + }) ch := make(chan struct{}) go func() { - defer wg.Done() close(ch) - vuErr := vu.RunOnce(newCtx) + vuErr := vu.RunOnce() assert.Error(t, vuErr) assert.Contains(t, vuErr.Error(), "context cancelled") }() <-ch time.Sleep(time.Millisecond * 1) // NOTE: increase this in case of problems ;) newCancel() + wg.Wait() } - wg.Wait() }) } } func TestVUIntegrationGroups(t *testing.T) { r1, err := getSimpleRunner("/script.js", ` - import { group } from "k6"; - export default function() { + var group = require("k6").group; + exports.default = function() { fnOuter(); group("my group", function() { fnInner(); @@ -609,7 +655,7 @@ func TestVUIntegrationGroups(t *testing.T) { for name, r := range testdata { r := r t.Run(name, func(t *testing.T) { - vu, err := r.newVU(make(chan stats.SampleContainer, 100)) + vu, err := r.newVU(1, make(chan stats.SampleContainer, 100)) if !assert.NoError(t, err) { return } @@ -634,7 +680,10 @@ func TestVUIntegrationGroups(t *testing.T) { assert.Equal(t, "my group", g.Parent.Name) assert.Equal(t, r.GetDefaultGroup(), g.Parent.Parent) }) - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + activeVU := vu.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = activeVU.RunOnce() assert.NoError(t, err) assert.True(t, fnOuterCalled, "fnOuter() not called") assert.True(t, fnInnerCalled, "fnInner() not called") @@ -645,10 +694,10 @@ func TestVUIntegrationGroups(t *testing.T) { func TestVUIntegrationMetrics(t *testing.T) { r1, err := getSimpleRunner("/script.js", ` - import { group } from "k6"; - import { Trend } from "k6/metrics"; - let myMetric = new Trend("my_metric"); - export default function() { myMetric.add(5); } + var group = require("k6").group; + var Trend = require("k6/metrics").Trend; + var myMetric = new Trend("my_metric"); + exports.default = function() { myMetric.add(5); } `) require.NoError(t, err) @@ -657,14 +706,18 @@ func TestVUIntegrationMetrics(t *testing.T) { testdata := map[string]*Runner{"Source": r1, "Archive": r2} for name, r := range testdata { + r := r t.Run(name, func(t *testing.T) { samples := make(chan stats.SampleContainer, 100) - vu, err := r.newVU(samples) + vu, err := r.newVU(1, samples) if !assert.NoError(t, err) { return } - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + activeVU := vu.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = activeVU.RunOnce() assert.NoError(t, err) sampleCount := 0 for i, sampleC := range stats.GetBufferedSamples(samples) { @@ -716,8 +769,8 @@ func TestVUIntegrationInsecureRequests(t *testing.T) { data := data t.Run(name, func(t *testing.T) { r1, err := getSimpleRunner("/script.js", ` - import http from "k6/http"; - export default function() { http.get("https://expired.badssl.com/"); } + var http = require("k6/http");; + exports.default = function() { http.get("https://expired.badssl.com/"); } `) require.NoError(t, err) require.NoError(t, r1.SetOptions(lib.Options{Throw: null.BoolFrom(true)}.Apply(data.opts))) @@ -726,14 +779,19 @@ func TestVUIntegrationInsecureRequests(t *testing.T) { require.NoError(t, err) runners := map[string]*Runner{"Source": r1, "Archive": r2} for name, r := range runners { + r := r t.Run(name, func(t *testing.T) { r.Logger, _ = logtest.NewNullLogger() - vu, err := r.NewVU(make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) if !assert.NoError(t, err) { return } - err = vu.RunOnce(context.Background()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() if data.errMsg != "" { require.Error(t, err) assert.Contains(t, err.Error(), data.errMsg) @@ -748,8 +806,8 @@ func TestVUIntegrationInsecureRequests(t *testing.T) { func TestVUIntegrationBlacklistOption(t *testing.T) { r1, err := getSimpleRunner("/script.js", ` - import http from "k6/http"; - export default function() { http.get("http://10.1.2.3/"); } + var http = require("k6/http");; + exports.default = function() { http.get("http://10.1.2.3/"); } `) require.NoError(t, err) @@ -770,12 +828,16 @@ func TestVUIntegrationBlacklistOption(t *testing.T) { runners := map[string]*Runner{"Source": r1, "Archive": r2} for name, r := range runners { + r := r t.Run(name, func(t *testing.T) { - vu, err := r.NewVU(make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) if !assert.NoError(t, err) { return } - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() require.Error(t, err) assert.Contains(t, err.Error(), "IP (10.1.2.3) is in a blacklisted range (10.0.0.0/8)") }) @@ -784,14 +846,14 @@ func TestVUIntegrationBlacklistOption(t *testing.T) { func TestVUIntegrationBlacklistScript(t *testing.T) { r1, err := getSimpleRunner("/script.js", ` - import http from "k6/http"; + var http = require("k6/http");; - export let options = { + exports.options = { throw: true, blacklistIPs: ["10.0.0.0/8"], }; - export default function() { http.get("http://10.1.2.3/"); } + exports.default = function() { http.get("http://10.1.2.3/"); } `) if !assert.NoError(t, err) { return @@ -807,11 +869,14 @@ func TestVUIntegrationBlacklistScript(t *testing.T) { for name, r := range runners { r := r t.Run(name, func(t *testing.T) { - vu, err := r.NewVU(make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) if !assert.NoError(t, err) { return } - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() require.Error(t, err) assert.Contains(t, err.Error(), "IP (10.1.2.3) is in a blacklisted range (10.0.0.0/8)") }) @@ -824,12 +889,14 @@ func TestVUIntegrationHosts(t *testing.T) { r1, err := getSimpleRunner("/script.js", tb.Replacer.Replace(` - import { check, fail } from "k6"; - import http from "k6/http"; - export default function() { - let res = http.get("http://test.loadimpact.com:HTTPBIN_PORT/"); + var k6 = require("k6"); + var check = k6.check; + var fail = k6.fail; + var http = require("k6/http");; + exports.default = function() { + var res = http.get("http://test.loadimpact.com:HTTPBIN_PORT/"); check(res, { - "is correct IP": (r) => r.remote_ip === "127.0.0.1" + "is correct IP": function(r) { return r.remote_ip === "127.0.0.1" } }) || fail("failed to override dns"); } `)) @@ -851,13 +918,17 @@ func TestVUIntegrationHosts(t *testing.T) { runners := map[string]*Runner{"Source": r1, "Archive": r2} for name, r := range runners { + r := r t.Run(name, func(t *testing.T) { - vu, err := r.NewVU(make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) if !assert.NoError(t, err) { return } - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() if !assert.NoError(t, err) { return } @@ -906,8 +977,8 @@ func TestVUIntegrationTLSConfig(t *testing.T) { data := data t.Run(name, func(t *testing.T) { r1, err := getSimpleRunner("/script.js", ` - import http from "k6/http"; - export default function() { http.get("https://sha256.badssl.com/"); } + var http = require("k6/http");; + exports.default = function() { http.get("https://sha256.badssl.com/"); } `) if !assert.NoError(t, err) { return @@ -921,14 +992,18 @@ func TestVUIntegrationTLSConfig(t *testing.T) { runners := map[string]*Runner{"Source": r1, "Archive": r2} for name, r := range runners { + r := r t.Run(name, func(t *testing.T) { r.Logger, _ = logtest.NewNullLogger() - vu, err := r.NewVU(make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) if !assert.NoError(t, err) { return } - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() if data.errMsg != "" { require.Error(t, err) assert.Contains(t, err.Error(), data.errMsg) @@ -943,15 +1018,35 @@ func TestVUIntegrationTLSConfig(t *testing.T) { func TestVUIntegrationOpenFunctionError(t *testing.T) { r, err := getSimpleRunner("/script.js", ` - export default function() { open("/tmp/foo") } + exports.default = function() { open("/tmp/foo") } + `) + assert.NoError(t, err) + + initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) + assert.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() + assert.Error(t, err) + assert.Contains(t, err.Error(), "only available in the init stage") +} + +func TestVUIntegrationOpenFunctionErrorWhenSneaky(t *testing.T) { + r, err := getSimpleRunner("/script.js", ` + var sneaky = open; + exports.default = function() { sneaky("/tmp/foo") } `) assert.NoError(t, err) - vu, err := r.NewVU(make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) assert.NoError(t, err) - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() assert.Error(t, err) - assert.Contains(t, err.Error(), "only available to init code") + assert.Contains(t, err.Error(), "only available in the init stage") } func TestVUIntegrationCookiesReset(t *testing.T) { @@ -959,16 +1054,16 @@ func TestVUIntegrationCookiesReset(t *testing.T) { defer tb.Cleanup() r1, err := getSimpleRunner("/script.js", tb.Replacer.Replace(` - import http from "k6/http"; - export default function() { - let url = "HTTPBIN_URL"; - let preRes = http.get(url + "/cookies"); + var http = require("k6/http");; + exports.default = function() { + var url = "HTTPBIN_URL"; + var preRes = http.get(url + "/cookies"); if (preRes.status != 200) { throw new Error("wrong status (pre): " + preRes.status); } if (preRes.json().k1 || preRes.json().k2) { throw new Error("cookies persisted: " + preRes.body); } - let res = http.get(url + "/cookies/set?k2=v2&k1=v1"); + var res = http.get(url + "/cookies/set?k2=v2&k1=v1"); if (res.status != 200) { throw new Error("wrong status: " + res.status) } if (res.json().k1 != "v1" || res.json().k2 != "v2") { throw new Error("wrong cookies: " + res.body); @@ -991,13 +1086,17 @@ func TestVUIntegrationCookiesReset(t *testing.T) { runners := map[string]*Runner{"Source": r1, "Archive": r2} for name, r := range runners { + r := r t.Run(name, func(t *testing.T) { - vu, err := r.NewVU(make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) if !assert.NoError(t, err) { return } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) for i := 0; i < 2; i++ { - err = vu.RunOnce(context.Background()) + err = vu.RunOnce() assert.NoError(t, err) } }) @@ -1009,11 +1108,11 @@ func TestVUIntegrationCookiesNoReset(t *testing.T) { defer tb.Cleanup() r1, err := getSimpleRunner("/script.js", tb.Replacer.Replace(` - import http from "k6/http"; - export default function() { - let url = "HTTPBIN_URL"; + var http = require("k6/http");; + exports.default = function() { + var url = "HTTPBIN_URL"; if (__ITER == 0) { - let res = http.get(url + "/cookies/set?k2=v2&k1=v1"); + var res = http.get(url + "/cookies/set?k2=v2&k1=v1"); if (res.status != 200) { throw new Error("wrong status: " + res.status) } if (res.json().k1 != "v1" || res.json().k2 != "v2") { throw new Error("wrong cookies: " + res.body); @@ -1021,7 +1120,7 @@ func TestVUIntegrationCookiesNoReset(t *testing.T) { } if (__ITER == 1) { - let res = http.get(url + "/cookies"); + var res = http.get(url + "/cookies"); if (res.status != 200) { throw new Error("wrong status (pre): " + res.status); } if (res.json().k1 != "v1" || res.json().k2 != "v2") { throw new Error("wrong cookies: " + res.body); @@ -1046,16 +1145,20 @@ func TestVUIntegrationCookiesNoReset(t *testing.T) { runners := map[string]*Runner{"Source": r1, "Archive": r2} for name, r := range runners { + r := r t.Run(name, func(t *testing.T) { - vu, err := r.NewVU(make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) if !assert.NoError(t, err) { return } - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() assert.NoError(t, err) - err = vu.RunOnce(context.Background()) + err = vu.RunOnce() assert.NoError(t, err) }) } @@ -1063,7 +1166,7 @@ func TestVUIntegrationCookiesNoReset(t *testing.T) { func TestVUIntegrationVUID(t *testing.T) { r1, err := getSimpleRunner("/script.js", ` - export default function() { + exports.default = function() { if (__VU != 1234) { throw new Error("wrong __VU: " + __VU); } }`, ) @@ -1079,13 +1182,17 @@ func TestVUIntegrationVUID(t *testing.T) { runners := map[string]*Runner{"Source": r1, "Archive": r2} for name, r := range runners { + r := r t.Run(name, func(t *testing.T) { - vu, err := r.NewVU(make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1234, make(chan stats.SampleContainer, 100)) if !assert.NoError(t, err) { return } - assert.NoError(t, vu.Reconfigure(1234)) - err = vu.RunOnce(context.Background()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() assert.NoError(t, err) }) } @@ -1156,8 +1263,8 @@ func TestVUIntegrationClientCerts(t *testing.T) { go func() { _ = srv.Serve(listener) }() r1, err := getSimpleRunner("/script.js", fmt.Sprintf(` - import http from "k6/http"; - export default function() { http.get("https://%s")} + var http = require("k6/http");; + exports.default = function() { http.get("https://%s")} `, listener.Addr().String())) if !assert.NoError(t, err) { return @@ -1175,11 +1282,15 @@ func TestVUIntegrationClientCerts(t *testing.T) { runners := map[string]*Runner{"Source": r1, "Archive": r2} for name, r := range runners { + r := r t.Run(name, func(t *testing.T) { r.Logger, _ = logtest.NewNullLogger() - vu, err := r.NewVU(make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) if assert.NoError(t, err) { - err := vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err := vu.RunOnce() require.Error(t, err) assert.Contains(t, err.Error(), "remote error: tls: bad certificate") } @@ -1221,10 +1332,14 @@ func TestVUIntegrationClientCerts(t *testing.T) { runners := map[string]*Runner{"Source": r1, "Archive": r2} for name, r := range runners { + r := r t.Run(name, func(t *testing.T) { - vu, err := r.NewVU(make(chan stats.SampleContainer, 100)) + initVU, err := r.NewVU(1, make(chan stats.SampleContainer, 100)) if assert.NoError(t, err) { - err := vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err := vu.RunOnce() assert.NoError(t, err) } }) @@ -1237,10 +1352,12 @@ func TestHTTPRequestInInitContext(t *testing.T) { defer tb.Cleanup() _, err := getSimpleRunner("/script.js", tb.Replacer.Replace(` - import { check, fail } from "k6"; - import http from "k6/http"; - let res = http.get("HTTPBIN_URL/"); - export default function() { + var k6 = require("k6"); + var check = k6.check; + var fail = k6.fail; + var http = require("k6/http");; + var res = http.get("HTTPBIN_URL/"); + exports.default = function() { console.log(test); } `)) @@ -1256,42 +1373,42 @@ func TestInitContextForbidden(t *testing.T) { table := [...][3]string{ { "http.request", - `import http from "k6/http"; - let res = http.get("HTTPBIN_URL"); - export default function() { console.log("p"); }`, + `var http = require("k6/http");; + var res = http.get("HTTPBIN_URL"); + exports.default = function() { console.log("p"); }`, k6http.ErrHTTPForbiddenInInitContext.Error(), }, { "http.batch", - `import http from "k6/http"; - let res = http.batch("HTTPBIN_URL/something", "HTTPBIN_URL/else"); - export default function() { console.log("p"); }`, + `var http = require("k6/http");; + var res = http.batch("HTTPBIN_URL/something", "HTTPBIN_URL/else"); + exports.default = function() { console.log("p"); }`, k6http.ErrBatchForbiddenInInitContext.Error(), }, { "http.cookieJar", - `import http from "k6/http"; - let jar = http.cookieJar(); - export default function() { console.log("p"); }`, + `var http = require("k6/http");; + var jar = http.cookieJar(); + exports.default = function() { console.log("p"); }`, k6http.ErrJarForbiddenInInitContext.Error(), }, { "check", - `import { check } from "k6"; - check("test", {'is test': (test) => test == "test"}) - export default function() { console.log("p"); }`, + `var check = require("k6").check; + check("test", {'is test': function(test) { return test == "test"}}) + exports.default = function() { console.log("p"); }`, k6.ErrCheckInInitContext.Error(), }, { "group", - `import { group } from "k6"; + `var group = require("k6").group; group("group1", function () { console.log("group1");}) - export default function() { console.log("p"); }`, + exports.default = function() { console.log("p"); }`, k6.ErrGroupInInitContext.Error(), }, { "ws", - `import ws from "k6/ws"; + `var ws = require("k6/ws"); var url = "ws://echo.websocket.org"; var params = { "tags": { "my_tag": "hello" } }; var response = ws.connect(url, params, function (socket) { @@ -1300,15 +1417,15 @@ func TestInitContextForbidden(t *testing.T) { }) }); - export default function() { console.log("p"); }`, + exports.default = function() { console.log("p"); }`, ws.ErrWSInInitContext.Error(), }, { "metric", - `import { Counter } from "k6/metrics"; - let counter = Counter("myCounter"); + `var Counter = require("k6/metrics").Counter; + var counter = Counter("myCounter"); counter.add(1); - export default function() { console.log("p"); }`, + exports.default = function() { console.log("p"); }`, k6metrics.ErrMetricsAddInInitContext.Error(), }, } @@ -1329,18 +1446,18 @@ func TestInitContextForbidden(t *testing.T) { } } -func TestArchiveRunningIntegraty(t *testing.T) { +func TestArchiveRunningIntegrity(t *testing.T) { tb := httpmultibin.NewHTTPMultiBin(t) defer tb.Cleanup() fs := afero.NewMemMapFs() data := tb.Replacer.Replace(` - let fput = open("/home/somebody/test.json"); - export let options = { setupTimeout: "10s", teardownTimeout: "10s" }; - export function setup() { + var fput = open("/home/somebody/test.json"); + exports.options = { setupTimeout: "10s", teardownTimeout: "10s" }; + exports.setup = function () { return JSON.parse(fput); } - export default function(data) { + exports.default = function(data) { if (data != 42) { throw new Error("incorrect answer " + data); } @@ -1348,7 +1465,7 @@ func TestArchiveRunningIntegraty(t *testing.T) { `) require.NoError(t, afero.WriteFile(fs, "/home/somebody/test.json", []byte(`42`), os.ModePerm)) require.NoError(t, afero.WriteFile(fs, "/script.js", []byte(data), os.ModePerm)) - r1, err := getSimpleRunnerWithFileFs("/script.js", data, fs) + r1, err := getSimpleRunner("/script.js", data, fs) require.NoError(t, err) buf := bytes.NewBuffer(nil) @@ -1361,13 +1478,17 @@ func TestArchiveRunningIntegraty(t *testing.T) { runners := map[string]*Runner{"Source": r1, "Archive": r2} for name, r := range runners { + r := r t.Run(name, func(t *testing.T) { ch := make(chan stats.SampleContainer, 100) err = r.Setup(context.Background(), ch) require.NoError(t, err) - vu, err := r.NewVU(ch) + initVU, err := r.NewVU(1, ch) require.NoError(t, err) - err = vu.RunOnce(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) + err = vu.RunOnce() require.NoError(t, err) }) } @@ -1379,10 +1500,9 @@ func TestArchiveNotPanicking(t *testing.T) { fs := afero.NewMemMapFs() require.NoError(t, afero.WriteFile(fs, "/non/existent", []byte(`42`), os.ModePerm)) - r1, err := getSimpleRunnerWithFileFs("/script.js", tb.Replacer.Replace(` - let fput = open("/non/existent"); - export default function(data) { - } + r1, err := getSimpleRunner("/script.js", tb.Replacer.Replace(` + var fput = open("/non/existent"); + exports.default = function(data) {} `), fs) require.NoError(t, err) @@ -1400,25 +1520,25 @@ func TestStuffNotPanicking(t *testing.T) { defer tb.Cleanup() r, err := getSimpleRunner("/script.js", tb.Replacer.Replace(` - import http from "k6/http"; - import ws from "k6/ws"; - import { group } from "k6"; - import { parseHTML } from "k6/html"; - - export let options = { iterations: 1, vus: 1, vusMax: 1 }; - - export default function() { - const doc = parseHTML(http.get("HTTPBIN_URL/html").body); - - let testCases = [ - () => group(), - () => group("test"), - () => group("test", "wat"), - () => doc.find('p').each(), - () => doc.find('p').each("wat"), - () => doc.find('p').map(), - () => doc.find('p').map("wat"), - () => ws.connect("WSBIN_URL/ws-echo"), + var http = require("k6/http"); + var ws = require("k6/ws"); + var group = require("k6").group; + var parseHTML = require("k6/html").parseHTML; + + exports.options = { iterations: 1, vus: 1, vusMax: 1 }; + + exports.default = function() { + var doc = parseHTML(http.get("HTTPBIN_URL/html").body); + + var testCases = [ + function() { return group()}, + function() { return group("test")}, + function() { return group("test", "wat")}, + function() { return doc.find('p').each()}, + function() { return doc.find('p').each("wat")}, + function() { return doc.find('p').map()}, + function() { return doc.find('p').map("wat")}, + function() { return ws.connect("WSBIN_URL/ws-echo")}, ]; testCases.forEach(function(fn, idx) { @@ -1441,12 +1561,13 @@ func TestStuffNotPanicking(t *testing.T) { require.NoError(t, err) ch := make(chan stats.SampleContainer, 1000) - vu, err := r.NewVU(ch) + initVU, err := r.NewVU(1, ch) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) + vu := initVU.Activate(&lib.VUActivationParams{RunContext: ctx}) errC := make(chan error) - go func() { errC <- vu.RunOnce(ctx) }() + go func() { errC <- vu.RunOnce() }() select { case <-time.After(15 * time.Second): @@ -1457,3 +1578,87 @@ func TestStuffNotPanicking(t *testing.T) { require.NoError(t, err) } } + +func TestSystemTags(t *testing.T) { + t.Parallel() + tb := httpmultibin.NewHTTPMultiBin(t) + defer tb.Cleanup() + + // Handle paths with custom logic + tb.Mux.HandleFunc("/wrong-redirect", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Location", "%") + w.WriteHeader(http.StatusTemporaryRedirect) + }) + + r, err := getSimpleRunner("/script.js", tb.Replacer.Replace(` + var http = require("k6/http"); + + exports.http_get = function() { + http.get("HTTPBIN_IP_URL"); + }; + exports.https_get = function() { + http.get("HTTPSBIN_IP_URL"); + }; + exports.bad_url_get = function() { + http.get("http://127.0.0.1:1"); + }; + exports.noop = function() {}; + `), lib.RuntimeOptions{CompatibilityMode: null.StringFrom("base")}) + require.NoError(t, err) + + httpURL, err := url.Parse(tb.ServerHTTP.URL) + require.NoError(t, err) + + testedSystemTags := []struct{ tag, exec, expVal string }{ + {"proto", "http_get", "HTTP/1.1"}, + {"status", "http_get", "200"}, + {"method", "http_get", "GET"}, + {"url", "http_get", tb.ServerHTTP.URL}, + {"url", "https_get", tb.ServerHTTPS.URL}, + {"ip", "http_get", httpURL.Hostname()}, + {"name", "http_get", tb.ServerHTTP.URL}, + {"group", "http_get", ""}, + {"vu", "http_get", "8"}, + {"vu", "noop", "9"}, + {"iter", "http_get", "0"}, + {"iter", "noop", "0"}, + {"tls_version", "https_get", "tls1.3"}, + {"ocsp_status", "https_get", "unknown"}, + {"error", "bad_url_get", `dial: connection refused`}, + {"error_code", "bad_url_get", "1212"}, + {"scenario", "http_get", "default"}, + //TODO: add more tests + } + + samples := make(chan stats.SampleContainer, 100) + for num, tc := range testedSystemTags { + num, tc := num, tc + t.Run(fmt.Sprintf("TC %d with only %s", num, tc.tag), func(t *testing.T) { + require.NoError(t, r.SetOptions(r.GetOptions().Apply(lib.Options{ + Throw: null.BoolFrom(false), + TLSVersion: &lib.TLSVersions{Max: lib.TLSVersion13}, + SystemTags: stats.ToSystemTagSet([]string{tc.tag}), + InsecureSkipTLSVerify: null.BoolFrom(true), + }))) + + vu, err := r.NewVU(int64(num), samples) + require.NoError(t, err) + activeVU := vu.Activate(&lib.VUActivationParams{ + RunContext: context.Background(), + Exec: tc.exec, + Scenario: "default", + }) + require.NoError(t, activeVU.RunOnce()) + + bufSamples := stats.GetBufferedSamples(samples) + assert.NotEmpty(t, bufSamples) + for _, sample := range bufSamples[0].GetSamples() { + assert.NotEmpty(t, sample.Tags) + for emittedTag, emittedVal := range sample.Tags.CloneTags() { + assert.Equal(t, tc.tag, emittedTag) + assert.Equal(t, tc.expVal, emittedVal) + } + } + }) + } +} diff --git a/lib/archive.go b/lib/archive.go index 69201c062cc..6671989b9a5 100644 --- a/lib/archive.go +++ b/lib/archive.go @@ -36,9 +36,10 @@ import ( "strings" "time" + "github.com/spf13/afero" + "github.com/loadimpact/k6/lib/fsext" "github.com/loadimpact/k6/loader" - "github.com/spf13/afero" ) //nolint: gochecknoglobals, lll @@ -107,56 +108,7 @@ func (arc *Archive) getFs(name string) afero.Fs { return fs } -// cleanUpWrongMetadataJSON fixes issues with the metadata.json contents before -// they are unmarshalled in the Archive struct. -// -// Currently, the only fix this function performs is the discarding of the -// derived `execution` config value in the consolidated options that was wrongly -// saved by k6 in the archive metadata.json files until commit -// 83193f8a96e06a190325b838b2cc451119d6b836. This basically means k6 v0.24.0 and -// surrounding master commits. We filter these out by the value of the k6version -// property, saved in the metadata.json since the previous to the above commit. -func cleanUpWrongMetadataJSON(data []byte) ([]byte, error) { - var tmpArc map[string]interface{} - if err := json.Unmarshal(data, &tmpArc); err != nil { - return nil, err - } - - k6Version := "" - if k6RawVersion, ok := tmpArc["k6version"]; ok { - if k6Version, ok = k6RawVersion.(string); !ok { - return nil, fmt.Errorf("k6version is present in the archive metadata, but it's not a string") - } - } - - // TODO: semantically parse the k6version and compare it with the current - // one, log a warning if the current k6 version in lib/consts is lower than - // the k6 version that generated the archive. - - if k6Version != "" && k6Version != "0.24.0" { - return data, nil - } - - if rawOptions, ok := tmpArc["options"]; !ok { - return nil, fmt.Errorf("missing options key in the archive metadata.json") - } else if options, ok := rawOptions.(map[string]interface{}); !ok { - return nil, fmt.Errorf("wrong options type in metadata.json") - } else if _, hasExecution := options["execution"]; !hasExecution { - return data, nil // no need to fix anything - } else { - delete(options, "execution") - tmpArc["options"] = options - } - - return json.Marshal(tmpArc) -} - -func (arc *Archive) loadMetadataJSON(data []byte) error { - data, err := cleanUpWrongMetadataJSON(data) - if err != nil { - return err - } - +func (arc *Archive) loadMetadataJSON(data []byte) (err error) { if err = json.Unmarshal(data, &arc); err != nil { return err } diff --git a/lib/archive_test.go b/lib/archive_test.go index 3bcdbd55273..740927ee345 100644 --- a/lib/archive_test.go +++ b/lib/archive_test.go @@ -30,14 +30,14 @@ import ( "runtime" "testing" - "github.com/loadimpact/k6/lib/consts" - "github.com/loadimpact/k6/lib/fsext" - "github.com/loadimpact/k6/stats" - "github.com/spf13/afero" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib/consts" + "github.com/loadimpact/k6/lib/fsext" + "github.com/loadimpact/k6/stats" ) func TestNormalizeAndAnonymizePath(t *testing.T) { diff --git a/js/compiler/compatibility_mode_gen.go b/lib/compatibility_mode_gen.go similarity index 98% rename from js/compiler/compatibility_mode_gen.go rename to lib/compatibility_mode_gen.go index 19d46d1876f..75b62e6de4b 100644 --- a/js/compiler/compatibility_mode_gen.go +++ b/lib/compatibility_mode_gen.go @@ -1,7 +1,7 @@ // Code generated by "enumer -type=CompatibilityMode -transform=snake -trimprefix CompatibilityMode -output compatibility_mode_gen.go"; DO NOT EDIT. // -package compiler +package lib import ( "fmt" diff --git a/lib/consts/consts.go b/lib/consts/consts.go index 1229d223723..c4d5ce92792 100644 --- a/lib/consts/consts.go +++ b/lib/consts/consts.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package consts import ( @@ -8,12 +28,12 @@ import ( ) // Version contains the current semantic version of k6. -var Version = "0.26.2" //nolint:gochecknoglobals +const Version = "0.27.0-dev" // VersionDetails can be set externally as part of the build process var VersionDetails = "" // nolint:gochecknoglobals -// FullVersion returns the maximully full version and build information for +// FullVersion returns the maximally full version and build information for // the currently running k6 executable. func FullVersion() string { goVersionArch := fmt.Sprintf("%s, %s/%s", runtime.Version(), runtime.GOOS, runtime.GOARCH) @@ -29,7 +49,7 @@ func FullVersion() string { } // Banner contains the ASCII-art banner with the k6 logo and stylized website URL -//TODO: make these into methods, only the version needs to be a variable +// TODO: make these into methods, only the version needs to be a variable //nolint:gochecknoglobals var Banner = strings.Join([]string{ ` /\ |‾‾| /‾‾/ /‾/ `, diff --git a/js/modules/k6/http/tls_go_1_11_test.go b/lib/consts/js.go similarity index 81% rename from js/modules/k6/http/tls_go_1_11_test.go rename to lib/consts/js.go index 7c34dfd356b..55603629cf8 100644 --- a/js/modules/k6/http/tls_go_1_11_test.go +++ b/lib/consts/js.go @@ -1,9 +1,7 @@ -// +build !go1.12 - /* * * k6 - a next-generation load testing tool - * Copyright (C) 2019 Load Impact + * Copyright (C) 2020 Load Impact * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as @@ -20,6 +18,12 @@ * */ -package http +package consts -const expectedTLSVersion = "tls1.2" +// JS constants +const ( + DefaultFn = "default" + Options = "options" + SetupFn = "setup" + TeardownFn = "teardown" +) diff --git a/lib/context.go b/lib/context.go index 9c626072c9a..a7544e4de4b 100644 --- a/lib/context.go +++ b/lib/context.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package lib import "context" diff --git a/lib/context_test.go b/lib/context_test.go index 8cd12b7a9ce..35dbc4ea6c6 100644 --- a/lib/context_test.go +++ b/lib/context_test.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package lib import ( diff --git a/lib/doc.go b/lib/doc.go index 4853c84ab0b..d653a9e24f5 100644 --- a/lib/doc.go +++ b/lib/doc.go @@ -18,12 +18,13 @@ * */ -// Package lib is a kitchen sink of... basically anything that doesn't belong in a specific part of -// the codebase, ranging from utility functions to universal types to core interfaces. +// Package lib is a kitchen sink of... basically anything that doesn't belong in +// a specific part of the codebase, ranging from utility functions to universal +// types to core interfaces. // -// Some of the things in lib do not actually belong in lib, most notably the Executor and Runner, -// which arguably belong in core. Other things are in files that are far too long, or that do not -// actually make sense. +// Some of the things in lib do not actually belong in lib, and need to be moved +// into either submodules, or separate modules like core. Other things are in +// files that are far too long, or that do not actually make sense. // // Feel free to move these things. package lib diff --git a/lib/execution.go b/lib/execution.go new file mode 100644 index 00000000000..8a0a3666bec --- /dev/null +++ b/lib/execution.go @@ -0,0 +1,608 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package lib + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/loadimpact/k6/stats" +) + +// An ExecutionScheduler is in charge of initializing executors and using them +// to initialize and schedule VUs created by a wrapped Runner. It decouples how +// a swarm of VUs is controlled from the details of how or even where they're +// scheduled. +// +// The core/local execution scheduler schedules VUs on the local machine, but +// the same interface may be implemented to control a test running on a cluster +// or in the cloud. +// +// TODO: flesh out the interface after actually having more than one +// implementation... +type ExecutionScheduler interface { + // Returns the wrapped runner. May return nil if not applicable, eg. + // if we're remote controlling a test running on another machine. + GetRunner() Runner + + // Return the ExecutionState instance from which different statistics for the + // current state of the runner could be retrieved. + GetState() *ExecutionState + + // Return the instances of the configured executors + GetExecutors() []Executor + + // Init initializes all executors, including all of their needed VUs. + Init(ctx context.Context, samplesOut chan<- stats.SampleContainer) error + + // Run the ExecutionScheduler, funneling the generated metric samples + // through the supplied out channel. + Run(globalCtx, runCtx context.Context, samplesOut chan<- stats.SampleContainer) error + + // Pause a test, or start/resume it. To check if a test is paused, use + // GetState().IsPaused(). + // + // Currently, any executor, so any test, can be started in a paused state. + // This will cause k6 to initialize all needed VUs, but it won't actually + // start the test. Later, the test can be started for real by + // resuming/unpausing it from the REST API. + // + // After a test is actually started, it may become impossible to pause it + // again. That is denoted by having SetPaused(true) return an error. The + // likely cause is that some of the executors for the test don't support + // pausing after the test has been started. + // + // IMPORTANT: Currently only the externally controlled executor can be + // paused and resumed multiple times in the middle of the test execution! + // Even then, "pausing" is a bit misleading, since k6 won't pause in the + // middle of the currently executing iterations. It will allow the currently + // in progress iterations to finish, and it just won't start any new ones + // nor will it increment the value returned by GetCurrentTestRunDuration(). + SetPaused(paused bool) error +} + +// MaxTimeToWaitForPlannedVU specifies the maximum allowable time for an executor +// to wait for a planned VU to be retrieved from the ExecutionState.PlannedVUs +// buffer. If it's exceeded, k6 will emit a warning log message, since it either +// means that there's a bug in the k6 scheduling code, or that the machine is +// overloaded and the scheduling code suffers from delays. +// +// Critically, exceeding this time *doesn't* result in an aborted test or any +// test errors, and the executor will continue to try and borrow the VU +// (potentially resulting in further warnings). We likely should emit a k6 +// metric about it in the future. TODO: emit a metric every time this is +// exceeded? +const MaxTimeToWaitForPlannedVU = 400 * time.Millisecond + +// MaxRetriesGetPlannedVU how many times we should wait for +// MaxTimeToWaitForPlannedVU before we actually return an error. +const MaxRetriesGetPlannedVU = 5 + +// ExecutionStatus is similar to RunStatus, but more fine grained and concerns +// only local execution. +//go:generate enumer -type=ExecutionStatus -trimprefix ExecutionStatus -output execution_status_gen.go +type ExecutionStatus uint32 + +// Possible execution status values +const ( + ExecutionStatusCreated ExecutionStatus = iota + ExecutionStatusInitVUs + ExecutionStatusInitExecutors + ExecutionStatusInitDone + ExecutionStatusPausedBeforeRun + ExecutionStatusStarted + ExecutionStatusSetup + ExecutionStatusRunning + ExecutionStatusTeardown + ExecutionStatusEnded +) + +// ExecutionState contains a few different things: +// - Some convenience items, that are needed by all executors, like the +// execution segment and the unique VU ID generator. By keeping those here, +// we can just pass the ExecutionState to the different executors, instead of +// individually passing them each item. +// - Mutable counters that different executors modify and other parts of +// k6 can read, e.g. for the vus and vus_max metrics k6 emits every second. +// - Pausing controls and statistics. +// +// The counters and timestamps here are primarily meant to be used for +// information extraction and avoidance of ID collisions. Using many of the +// counters here for synchronization between VUs could result in HIDDEN data +// races, because the Go data race detector can't detect any data races +// involving atomics... +// +// The only functionality intended for synchronization is the one revolving +// around pausing, and uninitializedUnplannedVUs for restricting the number of +// unplanned VUs being initialized. +type ExecutionState struct { + // A copy of the options, so the different executors have access to them. + // They will need to access things like the current execution segment, the + // per-run metrics tags, etc. + // + // Obviously, they are not meant to be changed... They should be a constant + // during the execution of a single test, but we can't easily enforce that + // via the Go type system... + Options Options + + ExecutionTuple *ExecutionTuple // TODO Rename, possibly move + + // vus is the shared channel buffer that contains all of the VUs that have + // been initialized and aren't currently being used by a executor. + // + // It contains both pre-initialized (i.e. planned) VUs, as well as any + // unplanned VUs. Planned VUs are initialized before a test begins, while + // unplanned VUS can be initialized in the middle of the test run by a + // executor and have been relinquished after it has finished working with + // them. Usually, unplanned VUs are initialized by one of the arrival-rate + // executors, after they have exhausted their PreAllocatedVUs. After the + // executor is done with the VUs, it will put in this channel, so it could + // potentially be reused by other executors further along in the test. + // + // Different executors cooperatively borrow VUs from here when they are + // needed and return them when they are done with them. There's no central + // enforcement of correctness, i.e. that a executor takes more VUs from + // here than its execution plan has stipulated. The correctness guarantee + // lies with the actual executors - bugs in one can affect others. + // + // That's why the field is private and we force executors to use the + // GetPlannedVU(), GetUnplannedVU(), and ReturnVU() methods instead of work + // directly with the channel. These methods will emit a warning or can even + // return an error if retrieving a VU takes more than + // MaxTimeToWaitForPlannedVU. + vus chan InitializedVU + + // The current VU ID, used for the __VU execution context variable. Use the + // GetUniqueVUIdentifier() to get unique values for each VU, starting from 1 + // (for backwards compatibility...) + currentVUIdentifier *uint64 + + // TODO: add something similar, but for iterations? Currently, there isn't + // a straightforward way to get a unique sequential identifier per iteration + // in the context of a single k6 instance. Combining __VU and __ITER gives us + // a unique identifier, but it's unwieldy and somewhat cumbersome. + + // Total number of currently initialized VUs. Generally equal to + // currentVUIdentifier minus 1, since initializedVUs starts from 0 and is + // incremented only after a VU is initialized, while CurrentVUIdentifier is + // incremented before a VU is initialized. It should always be greater than + // or equal to 0, but int64 is used for simplification of the used atomic + // arithmetic operations. + initializedVUs *int64 + + // Total number of unplanned VUs we haven't initialized yet. It starts + // being equal to GetMaxPossibleVUs(executionPlan)-GetMaxPlannedVUs(), and + // may stay that way if no unplanned VUs are initialized. Once it reaches 0, + // no more unplanned VUs can be initialized. + uninitializedUnplannedVUs *int64 + + // Injected when the execution scheduler's Init function is called, used for + // initializing unplanned VUs. + initVUFunc InitVUFunc + + // The number of VUs that are currently executing the test script. This also + // includes any VUs that are in the process of gracefully winding down, + // either at the end of the test, or when VUs are ramping down. It should + // always be greater than or equal to 0, but int64 is used for + // simplification of the used atomic arithmetic operations. + activeVUs *int64 + + // The total number of full (i.e uninterrupted) iterations that have been + // completed so far. + fullIterationsCount *uint64 + + // The total number of iterations that have been interrupted during their + // execution. The potential interruption causes vary - end of a specified + // script `duration`, scaling down of VUs via `stages`, a user hitting + // Ctrl+C, change of `vus` via the externally controlled executor's REST + // API, etc. + interruptedIterationsCount *uint64 + + // A machine-readable indicator in which the current state of the test + // execution is currently stored. Useful for the REST API and external + // observability of the k6 test run progress. + executionStatus *uint32 + + // A nanosecond UNIX timestamp that is set when the test is actually + // started. The default 0 value is used to denote that the test hasn't + // started yet... + startTime *int64 + + // A nanosecond UNIX timestamp that is set when the test ends, either + // by an early context cancel or at its regularly scheduled time. + // The default 0 value is used to denote that the test hasn't ended yet. + endTime *int64 + + // Stuff related to pausing follows. Read the docs in ExecutionScheduler for + // more information regarding how pausing works in k6. + // + // When we pause the execution in the middle of the test, we save the + // current timestamp in currentPauseTime. When we resume the execution, we + // set currentPauseTime back to 0 and we add the (time.Now() - + // currentPauseTime) duration to totalPausedDuration (unless the test hasn't + // started yet). + // + // Thus, the algorithm for GetCurrentTestRunDuration() is very + // straightforward: + // - if the test hasn't started, return 0 + // - set endTime to: + // - the current pauseTime, if not zero + // - time.Now() otherwise + // - return (endTime - startTime - totalPausedDuration) + // + // Quickly checking for IsPaused() just means comparing the currentPauseTime + // with 0, a single atomic operation. + // + // But if we want to wait until a script resumes, or be notified of the + // start/resume event from a channel (as part of a select{}), we have to + // acquire the pauseStateLock, get the current resumeNotify instance, + // release the lock and wait to read from resumeNotify (when it's closed by + // Resume()). + currentPauseTime *int64 + pauseStateLock sync.RWMutex + totalPausedDuration time.Duration // only modified behind the lock + resumeNotify chan struct{} +} + +// NewExecutionState initializes all of the pointers in the ExecutionState +// with zeros. It also makes sure that the initial state is unpaused, by +// setting resumeNotify to an already closed channel. +func NewExecutionState(options Options, et *ExecutionTuple, maxPlannedVUs, maxPossibleVUs uint64) *ExecutionState { + resumeNotify := make(chan struct{}) + close(resumeNotify) // By default the ExecutionState starts unpaused + + maxUnplannedUninitializedVUs := int64(maxPossibleVUs - maxPlannedVUs) + + return &ExecutionState{ + Options: options, + vus: make(chan InitializedVU, maxPossibleVUs), + + executionStatus: new(uint32), + currentVUIdentifier: new(uint64), + initializedVUs: new(int64), + uninitializedUnplannedVUs: &maxUnplannedUninitializedVUs, + activeVUs: new(int64), + fullIterationsCount: new(uint64), + interruptedIterationsCount: new(uint64), + startTime: new(int64), + endTime: new(int64), + currentPauseTime: new(int64), + pauseStateLock: sync.RWMutex{}, + totalPausedDuration: 0, // Accessed only behind the pauseStateLock + resumeNotify: resumeNotify, + ExecutionTuple: et, + } +} + +// GetUniqueVUIdentifier returns an auto-incrementing unique VU ID, used for __VU. +// It starts from 1 (for backwards compatibility...) +func (es *ExecutionState) GetUniqueVUIdentifier() uint64 { + return atomic.AddUint64(es.currentVUIdentifier, 1) +} + +// GetInitializedVUsCount returns the total number of currently initialized VUs. +// +// Important: this doesn't include any temporary/service VUs that are destroyed +// after they are used. These are created for the initial retrieval of the +// exported script options and for the execution of setup() and teardown() +// +// IMPORTANT: for UI/information purposes only, don't use for synchronization. +func (es *ExecutionState) GetInitializedVUsCount() int64 { + return atomic.LoadInt64(es.initializedVUs) +} + +// ModInitializedVUsCount changes the total number of currently initialized VUs. +// +// IMPORTANT: for UI/information purposes only, don't use for synchronization. +func (es *ExecutionState) ModInitializedVUsCount(mod int64) int64 { + return atomic.AddInt64(es.initializedVUs, mod) +} + +// GetCurrentlyActiveVUsCount returns the number of VUs that are currently +// executing the test script. This also includes any VUs that are in the process +// of gracefully winding down. +// +// IMPORTANT: for UI/information purposes only, don't use for synchronization. +func (es *ExecutionState) GetCurrentlyActiveVUsCount() int64 { + return atomic.LoadInt64(es.activeVUs) +} + +// ModCurrentlyActiveVUsCount changes the total number of currently active VUs. +// +// IMPORTANT: for UI/information purposes only, don't use for synchronization. +func (es *ExecutionState) ModCurrentlyActiveVUsCount(mod int64) int64 { + return atomic.AddInt64(es.activeVUs, mod) +} + +// GetFullIterationCount returns the total of full (i.e uninterrupted) iterations +// that have been completed so far. +// +// IMPORTANT: for UI/information purposes only, don't use for synchronization. +func (es *ExecutionState) GetFullIterationCount() uint64 { + return atomic.LoadUint64(es.fullIterationsCount) +} + +// AddFullIterations increments the number of full (i.e uninterrupted) iterations +// by the provided amount. +// +// IMPORTANT: for UI/information purposes only, don't use for synchronization. +func (es *ExecutionState) AddFullIterations(count uint64) uint64 { + return atomic.AddUint64(es.fullIterationsCount, count) +} + +// GetPartialIterationCount returns the total of partial (i.e interrupted) +// iterations that have been completed so far. +// +// IMPORTANT: for UI/information purposes only, don't use for synchronization. +func (es *ExecutionState) GetPartialIterationCount() uint64 { + return atomic.LoadUint64(es.interruptedIterationsCount) +} + +// AddInterruptedIterations increments the number of partial (i.e interrupted) +// iterations by the provided amount. +// +// IMPORTANT: for UI/information purposes only, don't use for synchronization. +func (es *ExecutionState) AddInterruptedIterations(count uint64) uint64 { + return atomic.AddUint64(es.interruptedIterationsCount, count) +} + +// SetExecutionStatus changes the current execution status to the supplied value +// and returns the current value. +func (es *ExecutionState) SetExecutionStatus(newStatus ExecutionStatus) (oldStatus ExecutionStatus) { + return ExecutionStatus(atomic.SwapUint32(es.executionStatus, uint32(newStatus))) +} + +// GetCurrentExecutionStatus returns the current execution status. Don't use +// this for synchronization unless you've made the k6 behavior somewhat +// predictable with options like --paused or --linger. +func (es *ExecutionState) GetCurrentExecutionStatus() ExecutionStatus { + return ExecutionStatus(atomic.LoadUint32(es.executionStatus)) +} + +// MarkStarted saves the current timestamp as the test start time. +// +// CAUTION: Calling MarkStarted() a second time for the same execution state will +// result in a panic! +func (es *ExecutionState) MarkStarted() { + if !atomic.CompareAndSwapInt64(es.startTime, 0, time.Now().UnixNano()) { + panic("the execution scheduler was started a second time") + } + es.SetExecutionStatus(ExecutionStatusStarted) +} + +// MarkEnded saves the current timestamp as the test end time. +// +// CAUTION: Calling MarkEnded() a second time for the same execution state will +// result in a panic! +func (es *ExecutionState) MarkEnded() { + if !atomic.CompareAndSwapInt64(es.endTime, 0, time.Now().UnixNano()) { + panic("the execution scheduler was stopped a second time") + } + es.SetExecutionStatus(ExecutionStatusEnded) +} + +// HasStarted returns true if the test has actually started executing. +// It will return false while a test is in the init phase, or if it has +// been initially paused. But if will return true if a test is paused +// midway through its execution (see above for details regarding the +// feasibility of that pausing for normal executors). +func (es *ExecutionState) HasStarted() bool { + return atomic.LoadInt64(es.startTime) != 0 +} + +// HasEnded returns true if the test has finished executing. It will return +// false until MarkEnded() is called. +func (es *ExecutionState) HasEnded() bool { + return atomic.LoadInt64(es.endTime) != 0 +} + +// IsPaused quickly returns whether the test is currently paused, by reading +// the atomic currentPauseTime timestamp +func (es *ExecutionState) IsPaused() bool { + return atomic.LoadInt64(es.currentPauseTime) != 0 +} + +// GetCurrentTestRunDuration returns the duration for which the test has already +// ran. If the test hasn't started yet, that's 0. If it has started, but has +// been paused midway through, it will return the time up until the pause time. +// And if it's currently running, it will return the time since the start time. +// +// IMPORTANT: for UI/information purposes only, don't use for synchronization. +func (es *ExecutionState) GetCurrentTestRunDuration() time.Duration { + startTime := atomic.LoadInt64(es.startTime) + if startTime == 0 { + // The test hasn't started yet + return 0 + } + + es.pauseStateLock.RLock() + endTime := atomic.LoadInt64(es.endTime) + pausedDuration := es.totalPausedDuration + es.pauseStateLock.RUnlock() + + if endTime == 0 { + pauseTime := atomic.LoadInt64(es.currentPauseTime) + if pauseTime != 0 { + endTime = pauseTime + } else { + // The test isn't paused or finished, use the current time instead + endTime = time.Now().UnixNano() + } + } + + return time.Duration(endTime-startTime) - pausedDuration +} + +// Pause pauses the current execution. It acquires the lock, writes +// the current timestamp in currentPauseTime, and makes a new +// channel for resumeNotify. +// Pause can return an error if the test was already paused. +func (es *ExecutionState) Pause() error { + es.pauseStateLock.Lock() + defer es.pauseStateLock.Unlock() + + if !atomic.CompareAndSwapInt64(es.currentPauseTime, 0, time.Now().UnixNano()) { + return errors.New("test execution was already paused") + } + es.resumeNotify = make(chan struct{}) + return nil +} + +// Resume unpauses the test execution. Unless the test wasn't +// yet started, it calculates the duration between now and +// the old currentPauseTime and adds it to +// Resume will emit an error if the test wasn't paused. +func (es *ExecutionState) Resume() error { + es.pauseStateLock.Lock() + defer es.pauseStateLock.Unlock() + + currentPausedTime := atomic.SwapInt64(es.currentPauseTime, 0) + if currentPausedTime == 0 { + return errors.New("test execution wasn't paused") + } + + // Check that it's not the pause before execution actually starts + if atomic.LoadInt64(es.startTime) != 0 { + es.totalPausedDuration += time.Duration(time.Now().UnixNano() - currentPausedTime) + } + + close(es.resumeNotify) + + return nil +} + +// ResumeNotify returns a channel which will be closed (i.e. could +// be read from) as soon as the test execution is resumed. +// +// Since tests would likely be paused only rarely, unless you +// directly need to be notified via a channel that the test +// isn't paused or that it has resumed, it's probably a good +// idea to first use the IsPaused() method, since it will be much +// faster. +// +// And, since tests won't be paused most of the time, it's +// probably better to check for that like this: +// if executionState.IsPaused() { +// <-executionState.ResumeNotify() +// } +func (es *ExecutionState) ResumeNotify() <-chan struct{} { + es.pauseStateLock.RLock() + defer es.pauseStateLock.RUnlock() + return es.resumeNotify +} + +// GetPlannedVU tries to get a pre-initialized VU from the buffer channel. This +// shouldn't fail and should generally be an instantaneous action, but if it +// doesn't happen for MaxTimeToWaitForPlannedVU (for example, because the system +// is overloaded), a warning will be printed. If we reach that timeout more than +// MaxRetriesGetPlannedVU number of times, this function will return an error, +// since we either have a bug with some executor, or the machine is very, very +// overloaded. +// +// If modifyActiveVUCount is true, the method would also increment the counter +// for active VUs. In most cases, that's the desired behavior, but some +// executors might have to retrieve their reserved VUs without using them +// immediately - for example, the externally-controlled executor when the +// configured maxVUs number is greater than the configured starting VUs. +func (es *ExecutionState) GetPlannedVU(logger *logrus.Entry, modifyActiveVUCount bool) (InitializedVU, error) { + for i := 1; i <= MaxRetriesGetPlannedVU; i++ { + select { + case vu := <-es.vus: + if modifyActiveVUCount { + es.ModCurrentlyActiveVUsCount(+1) + } + // TODO: set environment and exec + return vu, nil + case <-time.After(MaxTimeToWaitForPlannedVU): + logger.Warnf("Could not get a VU from the buffer for %s", time.Duration(i)*MaxTimeToWaitForPlannedVU) + } + } + return nil, fmt.Errorf( + "could not get a VU from the buffer in %s", + MaxRetriesGetPlannedVU*MaxTimeToWaitForPlannedVU, + ) +} + +// SetInitVUFunc is called by the execution scheduler's init function, and it's +// used for setting the "constructor" function used for the initializing +// unplanned VUs. +// +// TODO: figure out a better dependency injection method? +func (es *ExecutionState) SetInitVUFunc(initVUFunc InitVUFunc) { + es.initVUFunc = initVUFunc +} + +// GetUnplannedVU checks if any unplanned VUs remain to be initialized, and if +// they do, it initializes one and returns it. If all unplanned VUs have already +// been initialized, it returns one from the global vus buffer. +// +// IMPORTANT: GetUnplannedVU() doesn't do any checking if the requesting +// executor is actually allowed to have the VU at this particular time. +// Executors are trusted to correctly declare their needs (via their +// GetExecutionRequirements() methods) and then to never ask for more VUs than +// they have specified in those requirements. +func (es *ExecutionState) GetUnplannedVU(ctx context.Context, logger *logrus.Entry) (InitializedVU, error) { + remVUs := atomic.AddInt64(es.uninitializedUnplannedVUs, -1) + if remVUs < 0 { + logger.Debug("Reusing a previously initialized unplanned VU") + atomic.AddInt64(es.uninitializedUnplannedVUs, 1) + return es.GetPlannedVU(logger, true) + } + + logger.Debug("Initializing an unplanned VU, this may affect test results") + return es.InitializeNewVU(ctx, logger) +} + +// InitializeNewVU creates and returns a brand new VU, updating the relevant +// tracking counters. +func (es *ExecutionState) InitializeNewVU(ctx context.Context, logger *logrus.Entry) (InitializedVU, error) { + if es.initVUFunc == nil { + return nil, fmt.Errorf("initVUFunc wasn't set in the execution state") + } + newVU, err := es.initVUFunc(ctx, logger) + if err != nil { + return nil, err + } + es.ModInitializedVUsCount(+1) + return newVU, err +} + +// AddInitializedVU is a helper function that adds VUs into the buffer and +// increases the initialized VUs counter. +func (es *ExecutionState) AddInitializedVU(vu InitializedVU) { + es.vus <- vu + es.ModInitializedVUsCount(+1) +} + +// ReturnVU is a helper function that puts VUs back into the buffer and +// decreases the active VUs counter. +func (es *ExecutionState) ReturnVU(vu InitializedVU, wasActive bool) { + es.vus <- vu + if wasActive { + es.ModCurrentlyActiveVUsCount(-1) + } +} diff --git a/lib/execution_segment.go b/lib/execution_segment.go new file mode 100644 index 00000000000..081ef985bfb --- /dev/null +++ b/lib/execution_segment.go @@ -0,0 +1,732 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package lib + +import ( + "encoding" + "fmt" + "math/big" + "sort" + "strings" +) + +// ExecutionSegment represents a (start, end] partition of the total execution +// work for a specific test. For example, if we want the split the execution of a +// test in 2 different parts, we can split it in two segments (0, 0.5] and (0,5, 1]. +// +// We use rational numbers so it's easier to verify the correctness and easier to +// reason about portions of indivisible things, like VUs. This way, we can easily +// split a test in thirds (i.e. (0, 1/3], (1/3, 2/3], (2/3, 1]), without fearing +// that we'll lose a VU along the way... +// +// The most important part is that if work is split between multiple k6 instances, +// each k6 instance can precisely and reproducibly calculate its share of the work, +// just by knowing its own segment. There won't be a need to schedule the +// execution from a master node, or to even know how many other k6 instances are +// running! +type ExecutionSegment struct { + // 0 <= from < to <= 1 + from *big.Rat + to *big.Rat + + // derived, equals to-from, but pre-calculated here for speed + length *big.Rat +} + +// Ensure we implement those interfaces +var ( + _ encoding.TextUnmarshaler = &ExecutionSegment{} + _ fmt.Stringer = &ExecutionSegment{} +) + +// Helpful "constants" so we don't initialize them in every function call +var ( + zeroRat, oneRat = big.NewRat(0, 1), big.NewRat(1, 1) //nolint:gochecknoglobals + oneBigInt, twoBigInt = big.NewInt(1), big.NewInt(2) //nolint:gochecknoglobals +) + +// NewExecutionSegment validates the supplied arguments (basically, that 0 <= +// from < to <= 1) and either returns an error, or it returns a +// fully-initialized and usable execution segment. +func NewExecutionSegment(from, to *big.Rat) (*ExecutionSegment, error) { + if from.Cmp(zeroRat) < 0 { + return nil, fmt.Errorf("segment start value should be at least 0 but was %s", from.FloatString(2)) + } + if from.Cmp(to) >= 0 { + return nil, fmt.Errorf("segment start(%s) should be less than its end(%s)", from.FloatString(2), to.FloatString(2)) + } + if to.Cmp(oneRat) > 0 { + return nil, fmt.Errorf("segment end value shouldn't be more than 1 but was %s", to.FloatString(2)) + } + return newExecutionSegment(from, to), nil +} + +// newExecutionSegment just creates an ExecutionSegment without validating the arguments +func newExecutionSegment(from, to *big.Rat) *ExecutionSegment { + return &ExecutionSegment{ + from: from, + to: to, + length: new(big.Rat).Sub(to, from), + } +} + +// stringToRat is a helper function that tries to convert a string to a rational +// number while allowing percentage, decimal, and fraction values. +func stringToRat(s string) (*big.Rat, error) { + if strings.HasSuffix(s, "%") { + num, ok := new(big.Int).SetString(strings.TrimSuffix(s, "%"), 10) + if !ok { + return nil, fmt.Errorf("'%s' is not a valid percentage", s) + } + return new(big.Rat).SetFrac(num, big.NewInt(100)), nil + } + rat, ok := new(big.Rat).SetString(s) + if !ok { + return nil, fmt.Errorf("'%s' is not a valid percentage, decimal, fraction or interval value", s) + } + return rat, nil +} + +// NewExecutionSegmentFromString validates the supplied string value and returns +// the newly created ExecutionSegment or and error from it. +// +// We are able to parse both single percentage/float/fraction values, and actual +// (from: to] segments. For the single values, we just treat them as the +// beginning segment - thus the execution segment can be used as a shortcut for +// quickly running an arbitrarily scaled-down version of a test. +// +// The parsing logic is that values with a colon, i.e. ':', are full segments: +// `1/2:3/4`, `0.5:0.75`, `50%:75%`, and even `2/4:75%` should be (1/2, 3/4] +// And values without a colon are the end of a first segment: +// `20%`, `0.2`, and `1/5` should be converted to (0, 1/5] +// empty values should probably be treated as "1", i.e. the whole execution +func NewExecutionSegmentFromString(toStr string) (result *ExecutionSegment, err error) { + from := zeroRat + if toStr == "" { + toStr = "1" // an empty string means a full 0:1 execution segment + } + if strings.ContainsRune(toStr, ':') { + fromToStr := strings.SplitN(toStr, ":", 2) + toStr = fromToStr[1] + if from, err = stringToRat(fromToStr[0]); err != nil { + return nil, err + } + } + + to, err := stringToRat(toStr) + if err != nil { + return nil, err + } + + return NewExecutionSegment(from, to) +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface, so that +// execution segments can be specified as CLI flags, environment variables, and +// JSON strings. It is a wrapper for the NewExecutionFromString() constructor. +func (es *ExecutionSegment) UnmarshalText(text []byte) (err error) { + segment, err := NewExecutionSegmentFromString(string(text)) + if err != nil { + return err + } + *es = *segment + return nil +} + +func (es *ExecutionSegment) String() string { + if es == nil { + return "0:1" + } + return es.from.RatString() + ":" + es.to.RatString() +} + +// MarshalText implements the encoding.TextMarshaler interface, so is used for +// text and JSON encoding of the execution segment. +func (es *ExecutionSegment) MarshalText() ([]byte, error) { + if es == nil { + return nil, nil + } + return []byte(es.String()), nil +} + +// FloatLength is a helper method for getting some more human-readable +// information about the execution segment. +func (es *ExecutionSegment) FloatLength() float64 { + if es == nil { + return 1.0 + } + res, _ := es.length.Float64() + return res +} + +// Split evenly divides the execution segment into the specified number of +// equal consecutive execution sub-segments. +func (es *ExecutionSegment) Split(numParts int64) ([]*ExecutionSegment, error) { + if numParts < 1 { + return nil, fmt.Errorf("the number of parts should be at least 1, %d received", numParts) + } + + from, to := zeroRat, oneRat + if es != nil { + from, to = es.from, es.to + } + + increment := new(big.Rat).Sub(to, from) + increment.Denom().Mul(increment.Denom(), big.NewInt(numParts)) + + results := make([]*ExecutionSegment, numParts) + for i := int64(0); i < numParts; i++ { + segmentTo := new(big.Rat).Add(from, increment) + segment, err := NewExecutionSegment(from, segmentTo) + if err != nil { + return nil, err + } + results[i] = segment + from = segmentTo + } + + if from.Cmp(to) != 0 { + return nil, fmt.Errorf("expected %s and %s to be equal", from, to) + } + + return results, nil +} + +// Equal returns true only if the two execution segments have the same from and +// to values. +func (es *ExecutionSegment) Equal(other *ExecutionSegment) bool { + if es == other { + return true + } + thisFrom, otherFrom, thisTo, otherTo := zeroRat, zeroRat, oneRat, oneRat + if es != nil { + thisFrom, thisTo = es.from, es.to + } + if other != nil { + otherFrom, otherTo = other.from, other.to + } + return thisFrom.Cmp(otherFrom) == 0 && thisTo.Cmp(otherTo) == 0 +} + +// SubSegment returns a new execution sub-segment - if a is (1/2:1] and b is +// (0:1/2], then a.SubSegment(b) will return a new segment (1/2, 3/4]. +// +// The basic formula for c = a.SubSegment(b) is: +// c.from = a.from + b.from * (a.to - a.from) +// c.to = c.from + (b.to - b.from) * (a.to - a.from) +func (es *ExecutionSegment) SubSegment(child *ExecutionSegment) *ExecutionSegment { + if child == nil { + return es // 100% sub-segment is the original segment + } + + parentFrom, parentLength := zeroRat, oneRat + if es != nil { + parentFrom, parentLength = es.from, es.length + } + + resultFrom := new(big.Rat).Mul(parentLength, child.from) + resultFrom.Add(resultFrom, parentFrom) + + resultLength := new(big.Rat).Mul(parentLength, child.length) + return &ExecutionSegment{ + from: resultFrom, + length: resultLength, + to: new(big.Rat).Add(resultFrom, resultLength), + } +} + +// helper function for rounding (up) of rational numbers to big.Int values +func roundUp(rat *big.Rat) *big.Int { + quo, rem := new(big.Int).QuoRem(rat.Num(), rat.Denom(), new(big.Int)) + + if rem.Mul(rem, twoBigInt).Cmp(rat.Denom()) >= 0 { + return quo.Add(quo, oneBigInt) + } + return quo +} + +// Scale proportionally scales the supplied value, according to the execution +// segment's position and size of the work. +func (es *ExecutionSegment) Scale(value int64) int64 { + if es == nil { // no execution segment, i.e. 100% + return value + } + // Instead of the first proposal that used remainders and floor: + // floor( (value * from) % 1 + value * length ) + // We're using an alternative approach with rounding that (hopefully) has + // the same properties, but it's simpler and has better precision: + // round( (value * from) - round(value * from) + (value * (to - from)) )? + // which reduces to: + // round( (value * to) - round(value * from) )? + + toValue := big.NewRat(value, 1) + toValue.Mul(toValue, es.to) + + fromValue := big.NewRat(value, 1) + fromValue.Mul(fromValue, es.from) + + toValue.Sub(toValue, new(big.Rat).SetFrac(roundUp(fromValue), oneBigInt)) + + return roundUp(toValue).Int64() +} + +// InPlaceScaleRat scales rational numbers in-place - it changes the passed +// argument (and also returns it, to allow for chaining, like many other big.Rat +// methods). +func (es *ExecutionSegment) InPlaceScaleRat(value *big.Rat) *big.Rat { + if es == nil { // no execution segment, i.e. 100% + return value + } + return value.Mul(value, es.length) +} + +// CopyScaleRat scales rational numbers without changing them - creates a new +// bit.Rat object and uses it for the calculation. +func (es *ExecutionSegment) CopyScaleRat(value *big.Rat) *big.Rat { + if es == nil { // no execution segment, i.e. 100% + return value + } + return new(big.Rat).Mul(value, es.length) +} + +// ExecutionSegmentSequence represents an ordered chain of execution segments, +// where the end of one segment is the beginning of the next. It can serialized +// as a comma-separated string of rational numbers "r1,r2,r3,...,rn", which +// represents the sequence (r1, r2], (r2, r3], (r3, r4], ..., (r{n-1}, rn]. +// The empty value should be treated as if there is a single (0, 1] segment. +type ExecutionSegmentSequence []*ExecutionSegment + +// NewExecutionSegmentSequence validates the that the supplied execution +// segments are non-overlapping and without gaps. It will return a new execution +// segment sequence if that is true, and an error if it's not. +func NewExecutionSegmentSequence(segments ...*ExecutionSegment) (ExecutionSegmentSequence, error) { + if len(segments) > 1 { + to := segments[0].to + for i, segment := range segments[1:] { + if segment.from.Cmp(to) != 0 { + return nil, fmt.Errorf( + "the start value %s of segment #%d should be equal to the end value of the previous one, but it is %s", + segment.from, i+1, to, + ) + } + to = segment.to + } + } + return ExecutionSegmentSequence(segments), nil +} + +// NewExecutionSegmentSequenceFromString parses strings of the format +// "r1,r2,r3,...,rn", which represents the sequences like (r1, r2], (r2, r3], +// (r3, r4], ..., (r{n-1}, rn]. +func NewExecutionSegmentSequenceFromString(strSeq string) (ExecutionSegmentSequence, error) { + if len(strSeq) == 0 { + return nil, nil + } + + points := strings.Split(strSeq, ",") + if len(points) < 2 { + return nil, fmt.Errorf("at least 2 points are needed for an execution segment sequence, %d given", len(points)) + } + var start *big.Rat + + segments := make([]*ExecutionSegment, 0, len(points)-1) + for i, point := range points { + rat, err := stringToRat(point) + if err != nil { + return nil, err + } + if i == 0 { + start = rat + continue + } + + segment, err := NewExecutionSegment(start, rat) + if err != nil { + return nil, err + } + segments = append(segments, segment) + start = rat + } + + return NewExecutionSegmentSequence(segments...) +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface, so that +// execution segment sequences can be specified as CLI flags, environment +// variables, and JSON strings. +func (ess *ExecutionSegmentSequence) UnmarshalText(text []byte) (err error) { + seq, err := NewExecutionSegmentSequenceFromString(string(text)) + if err != nil { + return err + } + *ess = seq + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface, so is used for +// text and JSON encoding of the execution segment sequences. +func (ess ExecutionSegmentSequence) MarshalText() ([]byte, error) { + return []byte(ess.String()), nil +} + +// String just implements the fmt.Stringer interface, encoding the sequence of +// segments as "start1,end1,end2,end3,...,endn". +func (ess ExecutionSegmentSequence) String() string { + result := make([]string, 0, len(ess)+1) + for i, s := range ess { + if i == 0 { + result = append(result, s.from.RatString()) + } + result = append(result, s.to.RatString()) + } + return strings.Join(result, ",") +} + +// LCD calculates the lowest common denominator of the sequence. +// https://en.wikipedia.org/wiki/Least_common_multiple#Using_the_greatest_common_divisor +func (ess ExecutionSegmentSequence) LCD() int64 { + acc := ess[0].length.Denom().Int64() + var n int64 + for _, seg := range ess[1:] { + n = seg.length.Denom().Int64() + if acc == n || acc%n == 0 { // short circuit + continue + } + acc *= (n / gcd(acc, n)) + } + + return acc +} + +// Greatest common divisor +// https://en.wikipedia.org/wiki/Euclidean_algorithm +func gcd(a, b int64) int64 { + for a != b { + if a > b { + a -= b + } else { + b -= a + } + } + return a +} + +// IsFull returns whether the sequences is full, that is, whether it starts at 0 +// and ends at 1. Use GetFilledExecutionSegmentSequence() to get a full sequence. +func (ess ExecutionSegmentSequence) IsFull() bool { + return ess != nil && len(ess) != 0 && ess[0].from.Cmp(zeroRat) == 0 && ess[len(ess)-1].to.Cmp(oneRat) == 0 +} + +// FindSegmentPosition returns the index of the supplied execution segment in +// the sequence, or an error if the segment isn't present. This shouldn't be +// used on a nil or empty sequence, it's best to use this method on the result +// of GetFilledExecutionSegmentSequence(). +func (ess ExecutionSegmentSequence) FindSegmentPosition(segment *ExecutionSegment) (int, error) { + from := zeroRat + if segment != nil { + from = segment.from + } + index := sort.Search(len(ess), func(i int) bool { + return ess[i].from.Cmp(from) >= 0 + }) + + if index < 0 || index >= len(ess) || !ess[index].Equal(segment) { + return -1, fmt.Errorf("couldn't find segment %s in sequence %s", segment, ess) + } + return index, nil +} + +// GetFilledExecutionSegmentSequence makes sure we don't have any gaps in the +// given execution segment sequence, or a nil one. It makes sure that the whole +// 0-1 range is filled. +func GetFilledExecutionSegmentSequence( + sequence *ExecutionSegmentSequence, fallback *ExecutionSegment, +) (result ExecutionSegmentSequence) { + if sequence == nil || len(*sequence) == 0 { + if fallback == nil || fallback.length.Cmp(oneRat) == 0 { + // There is no sequence or a segment, so it means the whole test run + // is being planned/executed. So we make sure not to have a nil + // sequence, returning a full; "0,1" sequence instead, otherwise we + // will need to check for nil everywhere... + return ExecutionSegmentSequence{newExecutionSegment(zeroRat, oneRat)} + } + // We don't have a sequence, but we have a defined segment, so we + // fill around it with the missing pieces for a full sequence. + result = ExecutionSegmentSequence{fallback} + } else { + result = *sequence + } + + if result[0].from.Cmp(zeroRat) != 0 { + es := newExecutionSegment(zeroRat, result[0].from) + result = append(ExecutionSegmentSequence{es}, result...) + } + + if result[len(result)-1].to.Cmp(oneRat) != 0 { + es := newExecutionSegment(result[len(result)-1].to, oneRat) + result = append(result, es) + } + return result +} + +// ExecutionSegmentSequenceWrapper is a caching layer on top of the execution +// segment sequence that allows us to make fast and useful calculations, after +// a somewhat slow initialization. +type ExecutionSegmentSequenceWrapper struct { + ExecutionSegmentSequence // a filled-out segment sequence + lcd int64 // pre-calculated least common denominator + + // The striped offsets, i.e. the repeating indexes that "belong" to each + // execution segment in the sequence. + offsets [][]int64 +} + +// NewExecutionSegmentSequenceWrapper expects a filled-out execution segment +// sequence. It pre-calculates the initial caches of and returns a new +// ExecutionSegmentSequenceWrapper, but doesn't calculate the striped offsets. +func NewExecutionSegmentSequenceWrapper(ess ExecutionSegmentSequence) *ExecutionSegmentSequenceWrapper { + if !ess.IsFull() { + panic(fmt.Sprintf("Cannot wrap around a non-full execution segment sequence '%s'", ess)) + } + + sequenceLength := len(ess) + offsets := make([][]int64, sequenceLength) + lcd := ess.LCD() + + // This will contain the normalized numerator values (i.e. what they would have + // been if all denominators were equal to the LCD), sorted in descending + // order (i.e. biggest segments are first), with references to their actual + // indexes in the execution segment sequence (i.e. `seq` above). + sortedNormalizedIndexes := make([]struct { + normNumerator int64 + originalIndex int + }, sequenceLength) + + for i := range ess { + normalizedNumerator := ess[i].length.Num().Int64() * (lcd / ess[i].length.Denom().Int64()) + sortedNormalizedIndexes[i].normNumerator = normalizedNumerator + sortedNormalizedIndexes[i].originalIndex = i + offsets[i] = make([]int64, 0, normalizedNumerator+1) + } + + sort.SliceStable(sortedNormalizedIndexes, func(i, j int) bool { + return sortedNormalizedIndexes[i].normNumerator > sortedNormalizedIndexes[j].normNumerator + }) + + // This is the striping algorithm. Imagine you have a number of rational + // numbers which all add up to 1 (or less), and call them segments. If you + // want each to get proportional amount of anything, you need to give them + // their numerator count of elements for each denominator amount from the + // original elements. So, for 1/3, you give 1 element for each 3 elements. + // For 3/5 - 3 elements for each 5. If you have, for example, a sequence + // with elements with length 3/5 and 1/3, in order to know how to distribute + // it accurately, you need to get the LCD(lowest common denominitor). In + // this case, between 3 and 5, the LCD is 15. Then to transform the numbers + // to have the same, LCD equal, denominator. So 3/5 becomes 9/15 and 1/3 + // becomes 5/15. So now for each 15 elements 9 need to go to the 3/5, and 5 + // need to go to 1/3. This is what we did above in sortedNormalizedIndexes. + // + // We use the algorithm below to split elements between ExecutionSegments by + // using their length as the rational number. As we would like to get + // non-sequential elements, we try to get the maximum distance between them. + // That is the number of elements divided by the number of elements for any + // given segment, which concidently is the length of the segment reversed. + // The algorithm below does the following: + // 1. Goes through the elements from 0 to the lcd-1 + // 2. For each of element, it goes through the segments and looks if the + // amount of already taken elements by the given segment, multiplied by + // that segment's length inverted, is equal to or less to the current + // element index. If it is, give that element to that segment. If not, + // continue with the next element. + // The code below specifically avoids using big.Rat, for performance + // reasons, which complicates the code somewhat. As additional note, the + // sorting of the segments from biggest to smallest helps with the fact that + // the biggest elements will need to take the most elements, and for them it + // will be the hardest to not get sequential elements. + prev := make([]int64, sequenceLength) + chosenCounts := make([]int64, sequenceLength) + saveIndex := func(iteration int64, index int, numerator int64) { + offsets[index] = append(offsets[index], iteration-prev[index]) + prev[index] = iteration + if int64(len(offsets[index])) == numerator { + offsets[index] = append(offsets[index], offsets[index][0]+lcd-iteration) + } + } + for i := int64(0); i < lcd; i++ { + for sortedIndex, chosenCount := range chosenCounts { + num := chosenCount * lcd + denom := sortedNormalizedIndexes[sortedIndex].normNumerator + if i > num/denom || (i == num/denom && num%denom == 0) { + chosenCounts[sortedIndex]++ + saveIndex(i, sortedNormalizedIndexes[sortedIndex].originalIndex, denom) + break + } + } + } + + return &ExecutionSegmentSequenceWrapper{ExecutionSegmentSequence: ess, lcd: lcd, offsets: offsets} +} + +// LCD returns the (cached) least common denominator of the sequence - no need +// to calculate it again, since we did it in the constructor. +func (essw *ExecutionSegmentSequenceWrapper) LCD() int64 { + return essw.lcd +} + +// ScaleInt64 scales the provided value for the given segment. +func (essw *ExecutionSegmentSequenceWrapper) ScaleInt64(segmentIndex int, value int64) int64 { + start := essw.offsets[segmentIndex][0] + offsets := essw.offsets[segmentIndex][1:] + result := (value / essw.lcd) * int64(len(offsets)) + for gi, i := 0, start; i < value%essw.lcd; gi, i = gi+1, i+offsets[gi] { + result++ + } + return result +} + +// GetStripedOffsets returns the stripped offsets for the given segment +// the returned values are as follows in order: +// - start: the first value that is for the segment +// - offsets: a list of offsets from the previous value for the segment. This are only the offsets +// to from the start to the next start if we chunk the elements we are going to strip +// into lcd sized chunks +// - lcd: the LCD of the lengths of all segments in the sequence. This is also the number of +// elements after which the algorithm starts to loop and give the same values +func (essw *ExecutionSegmentSequenceWrapper) GetStripedOffsets(segmentIndex int) (int64, []int64, int64) { + offsets := essw.offsets[segmentIndex] + return offsets[0], offsets[1:], essw.lcd +} + +// GetTuple returns an ExecutionTuple for the specified segment index. +func (essw *ExecutionSegmentSequenceWrapper) GetTuple(segmentIndex int) *ExecutionTuple { + return &ExecutionTuple{ + Sequence: essw, + Segment: essw.ExecutionSegmentSequence[segmentIndex], + SegmentIndex: segmentIndex, + } +} + +// GetNewExecutionSegmentSequenceFromValue uses the value provided, splits it +// between all the segments, using the striping offsets in the sequence, +// generating a new segment sequence. It then returns a new +// ExecutionSegmentSequenceWrapper, with the new sequence and segments, such +// that each new segment in the new sequence has length `Scale(value)/value` +// while keeping the order. +// +// Additionally, the position of a given segment index can be tracked (since +// empty segments are removed), so that you can reconstruct an ExecutionTuple, +// if required. If the segment with the trackedIndex is not part of the new +// sequence, or if a new sequence cannot be generated (for example, for 0 +// values), an error will be returned. +func (essw *ExecutionSegmentSequenceWrapper) GetNewExecutionSegmentSequenceFromValue(value int64, trackedIndex int) ( + newSequence *ExecutionSegmentSequenceWrapper, newIndex int, err error, +) { + if value < 1 { + return nil, -1, fmt.Errorf("cannot generate new sequence for value %d", value) + } + + if value%essw.lcd == 0 { // the value is perfectly divisible so we will get the same tuple + return essw, trackedIndex, nil + } + + newIndex = -1 + newESS := make(ExecutionSegmentSequence, 0, len(essw.ExecutionSegmentSequence)) // this can be smaller + + prev := int64(0) + for i := range essw.ExecutionSegmentSequence { + newValue := essw.ScaleInt64(i, value) + if newValue == 0 { + continue + } + currentES := newExecutionSegment(big.NewRat(prev, value), big.NewRat(prev+newValue, value)) + prev += newValue + if i == trackedIndex { + newIndex = len(newESS) + } + newESS = append(newESS, currentES) + } + + if newIndex == -1 { + return nil, -1, fmt.Errorf( + "segment %d (%s) isn't present in the new sequence", + trackedIndex, essw.ExecutionSegmentSequence[trackedIndex], + ) + } + + return NewExecutionSegmentSequenceWrapper(newESS), newIndex, nil +} + +// ExecutionTuple is the combination of an ExecutionSegmentSequence(Wrapper) and +// a specific ExecutionSegment from it. It gives easy access to the efficient +// scaling and striping algorithms for that specific segment, since the results +// are cached in the sequence wrapper. +type ExecutionTuple struct { // TODO rename? make fields private and have getter methods? + Sequence *ExecutionSegmentSequenceWrapper + Segment *ExecutionSegment + SegmentIndex int +} + +func (et *ExecutionTuple) String() string { + return fmt.Sprintf("%s in %s", et.Segment, et.Sequence) +} + +// NewExecutionTuple returns a new ExecutionTuple for the provided segment and +// sequence. +// +// TODO: don't return a pointer? +func NewExecutionTuple(segment *ExecutionSegment, sequence *ExecutionSegmentSequence) (*ExecutionTuple, error) { + filledSeq := GetFilledExecutionSegmentSequence(sequence, segment) + wrapper := NewExecutionSegmentSequenceWrapper(filledSeq) + index, err := wrapper.FindSegmentPosition(segment) + if err != nil { + return nil, err + } + return &ExecutionTuple{Sequence: wrapper, Segment: segment, SegmentIndex: index}, nil +} + +// ScaleInt64 scales the provided value for our execution segment. +func (et *ExecutionTuple) ScaleInt64(value int64) int64 { + if len(et.Sequence.ExecutionSegmentSequence) == 1 { + return value // if we don't have any segmentation, just return the original value + } + return et.Sequence.ScaleInt64(et.SegmentIndex, value) +} + +// GetStripedOffsets returns the striped offsets for our execution segment. +func (et *ExecutionTuple) GetStripedOffsets() (int64, []int64, int64) { + return et.Sequence.GetStripedOffsets(et.SegmentIndex) +} + +// GetNewExecutionTupleFromValue re-segments the sequence, based on the given +// value (see GetNewExecutionSegmentSequenceFromValue() above), and either +// returns the new tuple, or an error if the current segment isn't present in +// the new sequence. +func (et *ExecutionTuple) GetNewExecutionTupleFromValue(value int64) (*ExecutionTuple, error) { + newSequenceWrapper, newIndex, err := et.Sequence.GetNewExecutionSegmentSequenceFromValue(value, et.SegmentIndex) + if err != nil { + return nil, err + } + return &ExecutionTuple{ + Sequence: newSequenceWrapper, + Segment: newSequenceWrapper.ExecutionSegmentSequence[newIndex], + SegmentIndex: newIndex, + }, nil +} diff --git a/lib/execution_segment_test.go b/lib/execution_segment_test.go new file mode 100644 index 00000000000..202e34cc87e --- /dev/null +++ b/lib/execution_segment_test.go @@ -0,0 +1,926 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package lib + +import ( + "fmt" + "math/big" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func stringToES(t *testing.T, str string) *ExecutionSegment { + es := new(ExecutionSegment) + require.NoError(t, es.UnmarshalText([]byte(str))) + return es +} + +func TestExecutionSegmentEquals(t *testing.T) { + t.Parallel() + + t.Run("nil segment to full", func(t *testing.T) { + var nilEs *ExecutionSegment + fullEs := stringToES(t, "0:1") + require.True(t, nilEs.Equal(fullEs)) + require.True(t, fullEs.Equal(nilEs)) + }) + + t.Run("To it's self", func(t *testing.T) { + es := stringToES(t, "1/2:2/3") + require.True(t, es.Equal(es)) + }) +} + +func TestExecutionSegmentNew(t *testing.T) { + t.Parallel() + t.Run("from is below zero", func(t *testing.T) { + _, err := NewExecutionSegment(big.NewRat(-1, 1), big.NewRat(1, 1)) + require.Error(t, err) + }) + t.Run("to is more than 1", func(t *testing.T) { + _, err := NewExecutionSegment(big.NewRat(0, 1), big.NewRat(2, 1)) + require.Error(t, err) + }) + t.Run("from is smaller than to", func(t *testing.T) { + _, err := NewExecutionSegment(big.NewRat(1, 2), big.NewRat(1, 3)) + require.Error(t, err) + }) + + t.Run("from is equal to 'to'", func(t *testing.T) { + _, err := NewExecutionSegment(big.NewRat(1, 2), big.NewRat(1, 2)) + require.Error(t, err) + }) + t.Run("ok", func(t *testing.T) { + _, err := NewExecutionSegment(big.NewRat(0, 1), big.NewRat(1, 1)) + require.NoError(t, err) + }) +} + +func TestExecutionSegmentUnmarshalText(t *testing.T) { + t.Parallel() + testCases := []struct { + input string + output *ExecutionSegment + isErr bool + }{ + {input: "0:1", output: &ExecutionSegment{from: zeroRat, to: oneRat}}, + {input: "0.5:0.75", output: &ExecutionSegment{from: big.NewRat(1, 2), to: big.NewRat(3, 4)}}, + {input: "1/2:3/4", output: &ExecutionSegment{from: big.NewRat(1, 2), to: big.NewRat(3, 4)}}, + {input: "50%:75%", output: &ExecutionSegment{from: big.NewRat(1, 2), to: big.NewRat(3, 4)}}, + {input: "2/4:75%", output: &ExecutionSegment{from: big.NewRat(1, 2), to: big.NewRat(3, 4)}}, + {input: "75%", output: &ExecutionSegment{from: zeroRat, to: big.NewRat(3, 4)}}, + {input: "125%", isErr: true}, + {input: "1a5%", isErr: true}, + {input: "1a5", isErr: true}, + {input: "1a5%:2/3", isErr: true}, + {input: "125%:250%", isErr: true}, + {input: "55%:50%", isErr: true}, + // TODO add more strange or not so strange cases + } + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.input, func(t *testing.T) { + es := new(ExecutionSegment) + err := es.UnmarshalText([]byte(testCase.input)) + if testCase.isErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.True(t, es.Equal(testCase.output)) + + // see if unmarshalling a stringified segment gets you back the same segment + err = es.UnmarshalText([]byte(es.String())) + require.NoError(t, err) + require.True(t, es.Equal(testCase.output)) + }) + } + + t.Run("Unmarshal nilSegment.String", func(t *testing.T) { + var nilEs *ExecutionSegment + nilEsStr := nilEs.String() + require.Equal(t, "0:1", nilEsStr) + + es := new(ExecutionSegment) + err := es.UnmarshalText([]byte(nilEsStr)) + require.NoError(t, err) + require.True(t, es.Equal(nilEs)) + }) +} + +func TestExecutionSegmentSplit(t *testing.T) { + t.Parallel() + + var nilEs *ExecutionSegment + _, err := nilEs.Split(-1) + require.Error(t, err) + + _, err = nilEs.Split(0) + require.Error(t, err) + + segments, err := nilEs.Split(1) + require.NoError(t, err) + require.Len(t, segments, 1) + assert.Equal(t, "0:1", segments[0].String()) + + segments, err = nilEs.Split(2) + require.NoError(t, err) + require.Len(t, segments, 2) + assert.Equal(t, "0:1/2", segments[0].String()) + assert.Equal(t, "1/2:1", segments[1].String()) + + segments, err = nilEs.Split(3) + require.NoError(t, err) + require.Len(t, segments, 3) + assert.Equal(t, "0:1/3", segments[0].String()) + assert.Equal(t, "1/3:2/3", segments[1].String()) + assert.Equal(t, "2/3:1", segments[2].String()) + + secondQuarter, err := NewExecutionSegment(big.NewRat(1, 4), big.NewRat(2, 4)) + require.NoError(t, err) + + segments, err = secondQuarter.Split(1) + require.NoError(t, err) + require.Len(t, segments, 1) + assert.Equal(t, "1/4:1/2", segments[0].String()) + + segments, err = secondQuarter.Split(2) + require.NoError(t, err) + require.Len(t, segments, 2) + assert.Equal(t, "1/4:3/8", segments[0].String()) + assert.Equal(t, "3/8:1/2", segments[1].String()) + + segments, err = secondQuarter.Split(3) + require.NoError(t, err) + require.Len(t, segments, 3) + assert.Equal(t, "1/4:1/3", segments[0].String()) + assert.Equal(t, "1/3:5/12", segments[1].String()) + assert.Equal(t, "5/12:1/2", segments[2].String()) + + segments, err = secondQuarter.Split(4) + require.NoError(t, err) + require.Len(t, segments, 4) + assert.Equal(t, "1/4:5/16", segments[0].String()) + assert.Equal(t, "5/16:3/8", segments[1].String()) + assert.Equal(t, "3/8:7/16", segments[2].String()) + assert.Equal(t, "7/16:1/2", segments[3].String()) +} + +func TestExecutionSegmentFailures(t *testing.T) { + t.Parallel() + es := new(ExecutionSegment) + require.NoError(t, es.UnmarshalText([]byte("0:0.25"))) + require.Equal(t, int64(1), es.Scale(2)) + require.Equal(t, int64(1), es.Scale(3)) + + require.NoError(t, es.UnmarshalText([]byte("0.25:0.5"))) + require.Equal(t, int64(0), es.Scale(2)) + require.Equal(t, int64(1), es.Scale(3)) + + require.NoError(t, es.UnmarshalText([]byte("0.5:0.75"))) + require.Equal(t, int64(1), es.Scale(2)) + require.Equal(t, int64(0), es.Scale(3)) + + require.NoError(t, es.UnmarshalText([]byte("0.75:1"))) + require.Equal(t, int64(0), es.Scale(2)) + require.Equal(t, int64(1), es.Scale(3)) +} + +func TestExecutionTupleScale(t *testing.T) { + t.Parallel() + es := new(ExecutionSegment) + require.NoError(t, es.UnmarshalText([]byte("0.5"))) + et, err := NewExecutionTuple(es, nil) + require.NoError(t, err) + require.Equal(t, int64(1), et.ScaleInt64(2)) + require.Equal(t, int64(2), et.ScaleInt64(3)) + + require.NoError(t, es.UnmarshalText([]byte("0.5:1.0"))) + et, err = NewExecutionTuple(es, nil) + require.NoError(t, err) + require.Equal(t, int64(1), et.ScaleInt64(2)) + require.Equal(t, int64(1), et.ScaleInt64(3)) + + ess, err := NewExecutionSegmentSequenceFromString("0,0.5,1") + require.NoError(t, err) + require.NoError(t, es.UnmarshalText([]byte("0.5"))) + et, err = NewExecutionTuple(es, &ess) + require.NoError(t, err) + require.Equal(t, int64(1), et.ScaleInt64(2)) + require.Equal(t, int64(2), et.ScaleInt64(3)) + + require.NoError(t, es.UnmarshalText([]byte("0.5:1.0"))) + et, err = NewExecutionTuple(es, &ess) + require.NoError(t, err) + require.Equal(t, int64(1), et.ScaleInt64(2)) + require.Equal(t, int64(1), et.ScaleInt64(3)) +} + +func TestBigScale(t *testing.T) { + es := new(ExecutionSegment) + ess, err := NewExecutionSegmentSequenceFromString("0,7/20,7/10,1") + require.NoError(t, err) + require.NoError(t, es.UnmarshalText([]byte("0:7/20"))) + et, err := NewExecutionTuple(es, &ess) + require.NoError(t, err) + require.Equal(t, int64(18), et.ScaleInt64(50)) +} + +func TestExecutionSegmentCopyScaleRat(t *testing.T) { + t.Parallel() + es := new(ExecutionSegment) + twoRat := big.NewRat(2, 1) + threeRat := big.NewRat(3, 1) + require.NoError(t, es.UnmarshalText([]byte("0.5"))) + require.Equal(t, oneRat, es.CopyScaleRat(twoRat)) + require.Equal(t, big.NewRat(3, 2), es.CopyScaleRat(threeRat)) + + require.NoError(t, es.UnmarshalText([]byte("0.5:1.0"))) + require.Equal(t, oneRat, es.CopyScaleRat(twoRat)) + require.Equal(t, big.NewRat(3, 2), es.CopyScaleRat(threeRat)) + + var nilEs *ExecutionSegment + require.Equal(t, twoRat, nilEs.CopyScaleRat(twoRat)) + require.Equal(t, threeRat, nilEs.CopyScaleRat(threeRat)) +} + +func TestExecutionSegmentInPlaceScaleRat(t *testing.T) { + t.Parallel() + es := new(ExecutionSegment) + twoRat := big.NewRat(2, 1) + threeRat := big.NewRat(3, 1) + threeSecondsRat := big.NewRat(3, 2) + require.NoError(t, es.UnmarshalText([]byte("0.5"))) + require.Equal(t, oneRat, es.InPlaceScaleRat(twoRat)) + require.Equal(t, oneRat, twoRat) + require.Equal(t, threeSecondsRat, es.InPlaceScaleRat(threeRat)) + require.Equal(t, threeSecondsRat, threeRat) + + es = stringToES(t, "0.5:1.0") + twoRat = big.NewRat(2, 1) + threeRat = big.NewRat(3, 1) + require.Equal(t, oneRat, es.InPlaceScaleRat(twoRat)) + require.Equal(t, oneRat, twoRat) + require.Equal(t, threeSecondsRat, es.InPlaceScaleRat(threeRat)) + require.Equal(t, threeSecondsRat, threeRat) + + var nilEs *ExecutionSegment + twoRat = big.NewRat(2, 1) + threeRat = big.NewRat(3, 1) + require.Equal(t, big.NewRat(2, 1), nilEs.InPlaceScaleRat(twoRat)) + require.Equal(t, big.NewRat(2, 1), twoRat) + require.Equal(t, big.NewRat(3, 1), nilEs.InPlaceScaleRat(threeRat)) + require.Equal(t, big.NewRat(3, 1), threeRat) +} + +func TestExecutionSegmentSubSegment(t *testing.T) { + t.Parallel() + testCases := []struct { + name string + base, sub, result *ExecutionSegment + }{ + // TODO add more strange or not so strange cases + { + name: "nil base", + base: (*ExecutionSegment)(nil), + sub: stringToES(t, "0.2:0.3"), + result: stringToES(t, "0.2:0.3"), + }, + + { + name: "nil sub", + base: stringToES(t, "0.2:0.3"), + sub: (*ExecutionSegment)(nil), + result: stringToES(t, "0.2:0.3"), + }, + { + name: "doc example", + base: stringToES(t, "1/2:1"), + sub: stringToES(t, "0:1/2"), + result: stringToES(t, "1/2:3/4"), + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + require.Equal(t, testCase.result, testCase.base.SubSegment(testCase.sub)) + }) + } +} + +func TestSplitBadSegment(t *testing.T) { + t.Parallel() + es := &ExecutionSegment{from: oneRat, to: zeroRat} + _, err := es.Split(5) + require.Error(t, err) +} + +func TestSegmentExecutionFloatLength(t *testing.T) { + t.Parallel() + t.Run("nil has 1.0", func(t *testing.T) { + var nilEs *ExecutionSegment + require.Equal(t, 1.0, nilEs.FloatLength()) + }) + + testCases := []struct { + es *ExecutionSegment + expected float64 + }{ + // TODO add more strange or not so strange cases + { + es: stringToES(t, "1/2:1"), + expected: 0.5, + }, + { + es: stringToES(t, "1/3:1"), + expected: 0.66666, + }, + + { + es: stringToES(t, "0:1/2"), + expected: 0.5, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.es.String(), func(t *testing.T) { + require.InEpsilon(t, testCase.expected, testCase.es.FloatLength(), 0.001) + }) + } +} + +func TestExecutionSegmentSequences(t *testing.T) { + t.Parallel() + + _, err := NewExecutionSegmentSequence(stringToES(t, "0:1/3"), stringToES(t, "1/2:1")) + assert.Error(t, err) +} + +func TestExecutionSegmentStringSequences(t *testing.T) { + t.Parallel() + testCases := []struct { + seq string + expSegments []string + expError bool + canReverse bool + // TODO: checks for least common denominator and maybe striped partitioning + }{ + {seq: "", expSegments: nil}, + {seq: "0.5", expError: true}, + {seq: "1,1", expError: true}, + {seq: "-0.5,1", expError: true}, + {seq: "1/2,1/2", expError: true}, + {seq: "1/2,1/3", expError: true}, + {seq: "0,1,1/2", expError: true}, + {seq: "0.5,1", expSegments: []string{"1/2:1"}}, + {seq: "1/2,1", expSegments: []string{"1/2:1"}, canReverse: true}, + {seq: "1/3,2/3", expSegments: []string{"1/3:2/3"}, canReverse: true}, + {seq: "0,1/3,2/3", expSegments: []string{"0:1/3", "1/3:2/3"}, canReverse: true}, + {seq: "0,1/3,2/3,1", expSegments: []string{"0:1/3", "1/3:2/3", "2/3:1"}, canReverse: true}, + {seq: "0.5,0.7", expSegments: []string{"1/2:7/10"}}, + {seq: "0.5,0.7,1", expSegments: []string{"1/2:7/10", "7/10:1"}}, + {seq: "0,1/13,2/13,1/3,1/2,3/4,1", expSegments: []string{ + "0:1/13", "1/13:2/13", "2/13:1/3", "1/3:1/2", "1/2:3/4", "3/4:1", + }, canReverse: true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.seq, func(t *testing.T) { + result, err := NewExecutionSegmentSequenceFromString(tc.seq) + if tc.expError { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, len(tc.expSegments), len(result)) + for i, expStrSeg := range tc.expSegments { + expSeg, errl := NewExecutionSegmentFromString(expStrSeg) + require.NoError(t, errl) + assert.Truef(t, expSeg.Equal(result[i]), "Segment %d (%s) should be equal to %s", i, result[i], expSeg) + } + if tc.canReverse { + assert.Equal(t, result.String(), tc.seq) + } + }) + } +} + +// Return a randomly distributed sequence of n amount of +// execution segments whose length totals 1. +func generateRandomSequence(t testing.TB, n, m int64, r *rand.Rand) ExecutionSegmentSequence { + var err error + ess := ExecutionSegmentSequence(make([]*ExecutionSegment, n)) + numerators := make([]int64, n) + var denominator int64 + for i := int64(0); i < n; i++ { + numerators[i] = 1 + r.Int63n(m) + denominator += numerators[i] + } + from := big.NewRat(0, 1) + for i := int64(0); i < n; i++ { + to := new(big.Rat).Add(big.NewRat(numerators[i], denominator), from) + ess[i], err = NewExecutionSegment(from, to) + require.NoError(t, err) + from = to + } + + return ess +} + +// Ensure that the sum of scaling all execution segments in +// the same sequence with scaling factor M results in M itself. +func TestExecutionSegmentScaleConsistency(t *testing.T) { + t.Parallel() + + seed := time.Now().UnixNano() + r := rand.New(rand.NewSource(seed)) + t.Logf("Random source seeded with %d\n", seed) + + const numTests = 10 + for i := 0; i < numTests; i++ { + scale := rand.Int31n(99) + 2 + seq := generateRandomSequence(t, r.Int63n(9)+2, 100, r) + + t.Run(fmt.Sprintf("%d_%s", scale, seq), func(t *testing.T) { + var total int64 + for _, segment := range seq { + total += segment.Scale(int64(scale)) + } + assert.Equal(t, int64(scale), total) + }) + } +} + +// Ensure that the sum of scaling all execution segments in +// the same sequence with scaling factor M results in M itself. +func TestExecutionTupleScaleConsistency(t *testing.T) { + t.Parallel() + + seed := time.Now().UnixNano() + r := rand.New(rand.NewSource(seed)) + t.Logf("Random source seeded with %d\n", seed) + + const numTests = 10 + for i := 0; i < numTests; i++ { + scale := rand.Int31n(99) + 2 + seq := generateRandomSequence(t, r.Int63n(9)+2, 200, r) + + et, err := NewExecutionTuple(seq[0], &seq) + require.NoError(t, err) + t.Run(fmt.Sprintf("%d_%s", scale, seq), func(t *testing.T) { + var total int64 + for i, segment := range seq { + assert.True(t, segment.Equal(et.Sequence.ExecutionSegmentSequence[i])) + total += et.Sequence.ScaleInt64(i, int64(scale)) + } + assert.Equal(t, int64(scale), total) + }) + } +} + +func TestExecutionSegmentScaleNoWobble(t *testing.T) { + t.Parallel() + + requireSegmentScaleGreater := func(t *testing.T, et *ExecutionTuple) { + var i, lastResult int64 + for i = 1; i < 1000; i++ { + result := et.ScaleInt64(i) + require.True(t, result >= lastResult, "%d<%d", result, lastResult) + lastResult = result + } + } + + // Baseline full segment test + t.Run("0:1", func(t *testing.T) { + et, err := NewExecutionTuple(nil, nil) + require.NoError(t, err) + requireSegmentScaleGreater(t, et) + }) + + seed := time.Now().UnixNano() + r := rand.New(rand.NewSource(seed)) + t.Logf("Random source seeded with %d\n", seed) + + // Random segments + const numTests = 10 + for i := 0; i < numTests; i++ { + seq := generateRandomSequence(t, r.Int63n(9)+2, 100, r) + + es := seq[rand.Intn(len(seq))] + + et, err := NewExecutionTuple(seq[0], &seq) + require.NoError(t, err) + t.Run(es.String(), func(t *testing.T) { + requireSegmentScaleGreater(t, et) + }) + } +} + +func TestGetStripedOffsets(t *testing.T) { + t.Parallel() + testCases := []struct { + seq string + seg string + start int64 + offsets []int64 + lcd int64 + }{ + // full sequences + {seq: "0,0.3,0.5,0.6,0.7,0.8,0.9,1", seg: "0:0.3", start: 0, offsets: []int64{4, 3, 3}, lcd: 10}, + {seq: "0,0.3,0.5,0.6,0.7,0.8,0.9,1", seg: "0.3:0.5", start: 1, offsets: []int64{4, 6}, lcd: 10}, + {seq: "0,0.3,0.5,0.6,0.7,0.8,0.9,1", seg: "0.5:0.6", start: 2, offsets: []int64{10}, lcd: 10}, + {seq: "0,0.3,0.5,0.6,0.7,0.8,0.9,1", seg: "0.6:0.7", start: 3, offsets: []int64{10}, lcd: 10}, + {seq: "0,0.3,0.5,0.6,0.7,0.8,0.9,1", seg: "0.8:0.9", start: 8, offsets: []int64{10}, lcd: 10}, + {seq: "0,0.3,0.5,0.6,0.7,0.8,0.9,1", seg: "0.9:1", start: 9, offsets: []int64{10}, lcd: 10}, + {seq: "0,0.2,0.5,0.6,0.7,0.8,0.9,1", seg: "0.9:1", start: 9, offsets: []int64{10}, lcd: 10}, + {seq: "0,0.2,0.5,0.6,0.7,0.8,0.9,1", seg: "0:0.2", start: 1, offsets: []int64{4, 6}, lcd: 10}, + {seq: "0,0.2,0.5,0.6,0.7,0.8,0.9,1", seg: "0.6:0.7", start: 3, offsets: []int64{10}, lcd: 10}, + // not full sequences + {seq: "0,0.2,0.5", seg: "0:0.2", start: 3, offsets: []int64{6, 4}, lcd: 10}, + {seq: "0,0.2,0.5", seg: "0.2:0.5", start: 1, offsets: []int64{4, 2, 4}, lcd: 10}, + {seq: "0,2/5,4/5", seg: "0:2/5", start: 0, offsets: []int64{3, 2}, lcd: 5}, + {seq: "0,2/5,4/5", seg: "2/5:4/5", start: 1, offsets: []int64{3, 2}, lcd: 5}, + // no sequence + {seg: "0:0.2", start: 1, offsets: []int64{5}, lcd: 5}, + {seg: "0:1/5", start: 1, offsets: []int64{5}, lcd: 5}, + {seg: "0:2/10", start: 1, offsets: []int64{5}, lcd: 5}, + {seg: "0:0.4", start: 1, offsets: []int64{2, 3}, lcd: 5}, + {seg: "0:2/5", start: 1, offsets: []int64{2, 3}, lcd: 5}, + {seg: "2/5:4/5", start: 1, offsets: []int64{3, 2}, lcd: 5}, + {seg: "0:4/10", start: 1, offsets: []int64{2, 3}, lcd: 5}, + {seg: "1/10:5/10", start: 1, offsets: []int64{2, 2, 4, 2}, lcd: 10}, + } + + for _, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("seq:%s;segment:%s", tc.seq, tc.seg), func(t *testing.T) { + ess, err := NewExecutionSegmentSequenceFromString(tc.seq) + require.NoError(t, err) + segment, err := NewExecutionSegmentFromString(tc.seg) + require.NoError(t, err) + et, err := NewExecutionTuple(segment, &ess) + require.NoError(t, err) + + start, offsets, lcd := et.GetStripedOffsets() + + assert.Equal(t, tc.start, start) + assert.Equal(t, tc.offsets, offsets) + assert.Equal(t, tc.lcd, lcd) + + ess2, err := NewExecutionSegmentSequenceFromString(tc.seq) + require.NoError(t, err) + assert.Equal(t, ess.String(), ess2.String()) + }) + } +} + +func TestSequenceLCD(t *testing.T) { + testCases := []struct { + seq string + lcd int64 + }{ + {seq: "0,0.3,0.5,0.6,0.7,0.8,0.9,1", lcd: 10}, + {seq: "0,0.1,0.5,0.6,0.7,0.8,0.9,1", lcd: 10}, + {seq: "0,0.2,0.5,0.6,0.7,0.8,0.9,1", lcd: 10}, + {seq: "0,1/3,5/6", lcd: 6}, + {seq: "0,1/3,4/7", lcd: 21}, + } + + for _, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("seq:%s", tc.seq), func(t *testing.T) { + ess, err := NewExecutionSegmentSequenceFromString(tc.seq) + require.NoError(t, err) + require.Equal(t, tc.lcd, ess.LCD()) + }) + } +} + +func BenchmarkGetStripedOffsets(b *testing.B) { + lengths := [...]int64{10, 100} + const seed = 777 + r := rand.New(rand.NewSource(seed)) + + for _, length := range lengths { + length := length + b.Run(fmt.Sprintf("length%d,seed%d", length, seed), func(b *testing.B) { + sequence := generateRandomSequence(b, length, 100, r) + b.ResetTimer() + for i := 0; i < b.N; i++ { + segment := sequence[int(r.Int63())%len(sequence)] + et, err := NewExecutionTuple(segment, &sequence) + require.NoError(b, err) + _, _, _ = et.GetStripedOffsets() + } + }) + } +} + +func BenchmarkGetStripedOffsetsEven(b *testing.B) { + lengths := [...]int64{10, 100, 1000} + generateSequence := func(n int64) ExecutionSegmentSequence { + var err error + ess := ExecutionSegmentSequence(make([]*ExecutionSegment, n)) + numerators := make([]int64, n) + var denominator int64 + for i := int64(0); i < n; i++ { + numerators[i] = 1 // nice and simple :) + denominator += numerators[i] + } + ess[0], err = NewExecutionSegment(big.NewRat(0, 1), big.NewRat(numerators[0], denominator)) + require.NoError(b, err) + for i := int64(1); i < n; i++ { + ess[i], err = NewExecutionSegment(ess[i-1].to, new(big.Rat).Add(big.NewRat(numerators[i], denominator), ess[i-1].to)) + require.NoError(b, err, "%d", i) + } + + return ess + } + + for _, length := range lengths { + length := length + b.Run(fmt.Sprintf("length%d", length), func(b *testing.B) { + sequence := generateSequence(length) + b.ResetTimer() + for i := 0; i < b.N; i++ { + segment := sequence[111233%len(sequence)] + et, err := NewExecutionTuple(segment, &sequence) + require.NoError(b, err) + _, _, _ = et.GetStripedOffsets() + } + }) + } +} + +func TestGetNewExecutionTupleBesedOnValue(t *testing.T) { + t.Parallel() + + testCases := []struct { + seq string + seg string + value int64 + expected string + }{ + // full sequences + {seq: "0,1/3,2/3,1", seg: "0:1/3", value: 20, expected: "0,7/20,7/10,1"}, + } + + for _, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("seq:%s;segment:%s", tc.seq, tc.seg), func(t *testing.T) { + ess, err := NewExecutionSegmentSequenceFromString(tc.seq) + require.NoError(t, err) + + segment, err := NewExecutionSegmentFromString(tc.seg) + require.NoError(t, err) + + et, err := NewExecutionTuple(segment, &ess) + require.NoError(t, err) + newET, err := et.GetNewExecutionTupleFromValue(tc.value) + require.NoError(t, err) + require.Equal(t, tc.expected, newET.Sequence.String()) + }) + } +} + +func mustNewExecutionSegment(str string) *ExecutionSegment { + res, err := NewExecutionSegmentFromString(str) + if err != nil { + panic(err) + } + return res +} + +func mustNewExecutionSegmentSequence(str string) *ExecutionSegmentSequence { + res, err := NewExecutionSegmentSequenceFromString(str) + if err != nil { + panic(err) + } + return &res +} + +func TestNewExecutionTuple(t *testing.T) { + testCases := []struct { + seg *ExecutionSegment + seq *ExecutionSegmentSequence + scaleTests map[int64]int64 + newScaleTests map[int64]map[int64]int64 // this is for after calling GetNewExecutionSegmentSequenceFromValue + }{ + { + // both segment and sequence are nil + scaleTests: map[int64]int64{ + 50: 50, + 1: 1, + 0: 0, + }, + newScaleTests: map[int64]map[int64]int64{ + 50: {50: 50, 1: 1, 0: 0}, + 1: {50: 50, 1: 1, 0: 0}, + 0: nil, + }, + }, + { + seg: mustNewExecutionSegment("0:1"), + // nil sequence + scaleTests: map[int64]int64{ + 50: 50, + 1: 1, + 0: 0, + }, + newScaleTests: map[int64]map[int64]int64{ + 50: {50: 50, 1: 1, 0: 0}, + 1: {50: 50, 1: 1, 0: 0}, + 0: nil, + }, + }, + { + seg: mustNewExecutionSegment("0:1"), + seq: mustNewExecutionSegmentSequence("0,1"), + scaleTests: map[int64]int64{ + 50: 50, + 1: 1, + 0: 0, + }, + newScaleTests: map[int64]map[int64]int64{ + 50: {50: 50, 1: 1, 0: 0}, + 1: {50: 50, 1: 1, 0: 0}, + 0: nil, + }, + }, + { + seg: mustNewExecutionSegment("0:1"), + seq: mustNewExecutionSegmentSequence(""), + scaleTests: map[int64]int64{ + 50: 50, + 1: 1, + 0: 0, + }, + newScaleTests: map[int64]map[int64]int64{ + 50: {50: 50, 1: 1, 0: 0}, + 1: {50: 50, 1: 1, 0: 0}, + 0: nil, + }, + }, + { + seg: mustNewExecutionSegment("0:1/3"), + seq: mustNewExecutionSegmentSequence("0,1/3,2/3,1"), + scaleTests: map[int64]int64{ + 50: 17, + 3: 1, + 2: 1, + 1: 1, + 0: 0, + }, + newScaleTests: map[int64]map[int64]int64{ + 50: {50: 17, 1: 1, 0: 0}, + 20: {50: 18, 1: 1, 0: 0}, + 3: {50: 17, 1: 1, 0: 0}, + 2: {50: 25, 1: 1, 0: 0}, + 1: {50: 50, 1: 1, 0: 0}, + 0: nil, + }, + }, + { + seg: mustNewExecutionSegment("1/3:2/3"), + seq: mustNewExecutionSegmentSequence("0,1/3,2/3,1"), + scaleTests: map[int64]int64{ + 50: 17, + 3: 1, + 2: 1, + 1: 0, + 0: 0, + }, + newScaleTests: map[int64]map[int64]int64{ + 50: {50: 17, 1: 0, 0: 0}, + 20: {50: 17, 1: 0, 0: 0}, + 3: {50: 17, 1: 0, 0: 0}, + 2: {50: 25, 1: 0, 0: 0}, + 1: nil, + 0: nil, + }, + }, + { + seg: mustNewExecutionSegment("2/3:1"), + seq: mustNewExecutionSegmentSequence("0,1/3,2/3,1"), + scaleTests: map[int64]int64{ + 50: 16, + 3: 1, + 2: 0, + 1: 0, + 0: 0, + }, + newScaleTests: map[int64]map[int64]int64{ + 50: {50: 16, 1: 0, 0: 0}, + 20: {50: 15, 1: 0, 0: 0}, + 3: {50: 16, 1: 0, 0: 0}, + 2: nil, + 1: nil, + 0: nil, + }, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(fmt.Sprintf("seg:'%s',seq:'%s'", testCase.seg, testCase.seq), func(t *testing.T) { + et, err := NewExecutionTuple(testCase.seg, testCase.seq) + require.NoError(t, err) + + for scaleValue, result := range testCase.scaleTests { + require.Equal(t, result, et.ScaleInt64(scaleValue), "%d->%d", scaleValue, result) + } + + for value, newResult := range testCase.newScaleTests { + newET, err := et.GetNewExecutionTupleFromValue(value) + if newResult == nil { + require.Error(t, err) + continue + } + require.NoError(t, err) + for scaleValue, result := range newResult { + require.Equal(t, result, newET.ScaleInt64(scaleValue), + "GetNewExecutionTupleFromValue(%d)%d->%d", value, scaleValue, result) + } + } + }) + } +} + +func BenchmarkExecutionSegmentScale(b *testing.B) { + testCases := []struct { + seq string + seg string + }{ + {}, + {seg: "0:1"}, + {seq: "0,0.3,0.5,0.6,0.7,0.8,0.9,1", seg: "0:0.3"}, + {seq: "0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1", seg: "0:0.1"}, + {seg: "2/5:4/5"}, + {seg: "2235/5213:4/5"}, // just wanted it to be ugly ;D + } + + for _, tc := range testCases { + tc := tc + b.Run(fmt.Sprintf("seq:%s;segment:%s", tc.seq, tc.seg), func(b *testing.B) { + ess, err := NewExecutionSegmentSequenceFromString(tc.seq) + require.NoError(b, err) + segment, err := NewExecutionSegmentFromString(tc.seg) + require.NoError(b, err) + if tc.seg == "" { + segment = nil // specifically for the optimization + } + et, err := NewExecutionTuple(segment, &ess) + require.NoError(b, err) + for _, value := range []int64{5, 5523, 5000000, 67280421310721} { + value := value + b.Run(fmt.Sprintf("segment.Scale(%d)", value), func(b *testing.B) { + for i := 0; i < b.N; i++ { + segment.Scale(value) + } + }) + + b.Run(fmt.Sprintf("et.Scale(%d)", value), func(b *testing.B) { + for i := 0; i < b.N; i++ { + et, err = NewExecutionTuple(segment, &ess) + require.NoError(b, err) + et.ScaleInt64(value) + } + }) + + et.ScaleInt64(1) // precache + b.Run(fmt.Sprintf("et.Scale(%d) prefilled", value), func(b *testing.B) { + for i := 0; i < b.N; i++ { + et.ScaleInt64(value) + } + }) + } + }) + } +} + +// TODO: test with randomized things diff --git a/lib/execution_status_gen.go b/lib/execution_status_gen.go new file mode 100644 index 00000000000..aafbcc6b303 --- /dev/null +++ b/lib/execution_status_gen.go @@ -0,0 +1,58 @@ +// Code generated by "enumer -type=ExecutionStatus -trimprefix ExecutionStatus -output execution_status_gen.go"; DO NOT EDIT. + +// +package lib + +import ( + "fmt" +) + +const _ExecutionStatusName = "CreatedInitVUsInitExecutorsInitDonePausedBeforeRunStartedSetupRunningTeardownEnded" + +var _ExecutionStatusIndex = [...]uint8{0, 7, 14, 27, 35, 50, 57, 62, 69, 77, 82} + +func (i ExecutionStatus) String() string { + if i >= ExecutionStatus(len(_ExecutionStatusIndex)-1) { + return fmt.Sprintf("ExecutionStatus(%d)", i) + } + return _ExecutionStatusName[_ExecutionStatusIndex[i]:_ExecutionStatusIndex[i+1]] +} + +var _ExecutionStatusValues = []ExecutionStatus{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + +var _ExecutionStatusNameToValueMap = map[string]ExecutionStatus{ + _ExecutionStatusName[0:7]: 0, + _ExecutionStatusName[7:14]: 1, + _ExecutionStatusName[14:27]: 2, + _ExecutionStatusName[27:35]: 3, + _ExecutionStatusName[35:50]: 4, + _ExecutionStatusName[50:57]: 5, + _ExecutionStatusName[57:62]: 6, + _ExecutionStatusName[62:69]: 7, + _ExecutionStatusName[69:77]: 8, + _ExecutionStatusName[77:82]: 9, +} + +// ExecutionStatusString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ExecutionStatusString(s string) (ExecutionStatus, error) { + if val, ok := _ExecutionStatusNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to ExecutionStatus values", s) +} + +// ExecutionStatusValues returns all values of the enum +func ExecutionStatusValues() []ExecutionStatus { + return _ExecutionStatusValues +} + +// IsAExecutionStatus returns "true" if the value is listed in the enum definition. "false" otherwise +func (i ExecutionStatus) IsAExecutionStatus() bool { + for _, v := range _ExecutionStatusValues { + if i == v { + return true + } + } + return false +} diff --git a/lib/executor.go b/lib/executor.go deleted file mode 100644 index 668c0e18da3..00000000000 --- a/lib/executor.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * - * k6 - a next-generation load testing tool - * Copyright (C) 2016 Load Impact - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -package lib - -import ( - "context" - "time" - - "github.com/sirupsen/logrus" - null "gopkg.in/guregu/null.v3" - - "github.com/loadimpact/k6/lib/types" - "github.com/loadimpact/k6/stats" -) - -// An Executor is in charge of scheduling VUs created by a wrapped Runner, but decouples how you -// control a swarm of VUs from the details of how or even where they're scheduled. -// -// The core/local executor schedules VUs on the local machine, but the same interface may be -// implemented to control a test running on a cluster or in the cloud. -type Executor interface { - // Run the Executor, funneling generated samples through the out channel. - Run(ctx context.Context, engineOut chan<- stats.SampleContainer) error - // Is the executor currently running? - IsRunning() bool - - // Returns the wrapped runner. May return nil if not applicable, eg. if we're remote - // controlling a test running on another machine. - GetRunner() Runner - - // Get and set the logger. This is propagated to the Runner. - GetLogger() *logrus.Logger - SetLogger(l *logrus.Logger) - - // Get and set the list of stages. - GetStages() []Stage - SetStages(s []Stage) - - // Get iterations executed so far, get and set how many to end the test after. - GetIterations() int64 - GetEndIterations() null.Int - SetEndIterations(i null.Int) - - // Get time elapsed so far, accounting for pauses, get and set at what point to end the test. - GetTime() time.Duration - GetEndTime() types.NullDuration - SetEndTime(t types.NullDuration) - - // Check whether the test is paused, or pause it. A paused won't start any new iterations (but - // will allow currently in progress ones to finish), and will not increment the value returned - // by GetTime(). - IsPaused() bool - SetPaused(paused bool) - - // Get and set the number of currently active VUs. - // It is an error to try to set this higher than MaxVUs. - GetVUs() int64 - SetVUs(vus int64) error - - // Get and set the number of allocated, available VUs. - // Please note that initialising new VUs is a very expensive operation, and doing it during a - // running test may skew metrics; if you're not sure how many you will need, it's generally - // speaking better to preallocate too many than too few. - GetVUsMax() int64 - SetVUsMax(max int64) error - - // Set whether or not to run setup/teardown phases. Default is to run all of them. - SetRunSetup(r bool) - SetRunTeardown(r bool) -} diff --git a/lib/executor/base_config.go b/lib/executor/base_config.go new file mode 100644 index 00000000000..40bbe461ea9 --- /dev/null +++ b/lib/executor/base_config.go @@ -0,0 +1,158 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "fmt" + "regexp" + "strings" + "time" + + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib/consts" + "github.com/loadimpact/k6/lib/types" +) + +// DefaultGracefulStopValue is the graceful top value for all executors, unless +// it's manually changed by the gracefulStop in each one. +// TODO?: Discard? Or make this actually user-configurable somehow? hello #883... +var DefaultGracefulStopValue = 30 * time.Second //nolint:gochecknoglobals + +var executorNameWhitelist = regexp.MustCompile(`^[0-9a-zA-Z_-]+$`) //nolint:gochecknoglobals +const executorNameErr = "the executor name should contain only numbers, latin letters, underscores, and dashes" + +// BaseConfig contains the common config fields for all executors +type BaseConfig struct { + Name string `json:"-"` // set via the JS object key + Type string `json:"executor"` + StartTime types.NullDuration `json:"startTime"` + GracefulStop types.NullDuration `json:"gracefulStop"` + Env map[string]string `json:"env"` + Exec null.String `json:"exec"` // function name, externally validated + Tags map[string]string `json:"tags"` + + // TODO: future extensions like distribution, others? +} + +// NewBaseConfig returns a default base config with the default values +func NewBaseConfig(name, configType string) BaseConfig { + return BaseConfig{ + Name: name, + Type: configType, + GracefulStop: types.NewNullDuration(DefaultGracefulStopValue, false), + } +} + +// Validate checks some basic things like present name, type, and a positive start time +func (bc BaseConfig) Validate() (errors []error) { + // Some just-in-case checks, since those things are likely checked in other places or + // even assigned by us: + if bc.Name == "" { + errors = append(errors, fmt.Errorf("executor name shouldn't be empty")) + } + if !executorNameWhitelist.MatchString(bc.Name) { + errors = append(errors, fmt.Errorf(executorNameErr)) + } + if bc.Exec.Valid && bc.Exec.String == "" { + errors = append(errors, fmt.Errorf("exec value cannot be empty")) + } + if bc.Type == "" { + errors = append(errors, fmt.Errorf("missing or empty type field")) + } + // The actually reasonable checks: + if bc.StartTime.Duration < 0 { + errors = append(errors, fmt.Errorf("the startTime can't be negative")) + } + if bc.GracefulStop.Duration < 0 { + errors = append(errors, fmt.Errorf("the gracefulStop timeout can't be negative")) + } + return errors +} + +// GetName returns the name of the executor. +func (bc BaseConfig) GetName() string { + return bc.Name +} + +// GetType returns the executor's type as a string ID. +func (bc BaseConfig) GetType() string { + return bc.Type +} + +// GetStartTime returns the starting time, relative to the beginning of the +// actual test, that this executor is supposed to execute. +func (bc BaseConfig) GetStartTime() time.Duration { + return time.Duration(bc.StartTime.Duration) +} + +// GetGracefulStop returns how long k6 is supposed to wait for any still +// running iterations to finish executing at the end of the normal executor +// duration, before it actually kills them. +// +// Of course, that doesn't count when the user manually interrupts the test, +// then iterations are immediately stopped. +func (bc BaseConfig) GetGracefulStop() time.Duration { + return time.Duration(bc.GracefulStop.Duration) +} + +// GetEnv returns any specific environment key=value pairs that +// are configured for the executor. +func (bc BaseConfig) GetEnv() map[string]string { + return bc.Env +} + +// GetExec returns the configured custom exec value, if any. +func (bc BaseConfig) GetExec() string { + exec := bc.Exec.ValueOrZero() + if exec == "" { + exec = consts.DefaultFn + } + return exec +} + +// GetTags returns any custom tags configured for the executor. +func (bc BaseConfig) GetTags() map[string]string { + return bc.Tags +} + +// IsDistributable returns true since by default all executors could be run in +// a distributed manner. +func (bc BaseConfig) IsDistributable() bool { + return true +} + +// getBaseInfo is a helper method for the "parent" String methods. +func (bc BaseConfig) getBaseInfo(facts ...string) string { + if bc.Exec.Valid { + facts = append(facts, fmt.Sprintf("exec: %s", bc.Exec.String)) + } + if bc.StartTime.Duration > 0 { + facts = append(facts, fmt.Sprintf("startTime: %s", bc.StartTime.Duration)) + } + if bc.GracefulStop.Duration > 0 { + facts = append(facts, fmt.Sprintf("gracefulStop: %s", bc.GracefulStop.Duration)) + } + if len(facts) == 0 { + return "" + } + return " (" + strings.Join(facts, ", ") + ")" +} diff --git a/lib/executor/base_executor.go b/lib/executor/base_executor.go new file mode 100644 index 00000000000..f43377983e6 --- /dev/null +++ b/lib/executor/base_executor.go @@ -0,0 +1,90 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "strconv" + + "github.com/sirupsen/logrus" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/stats" + "github.com/loadimpact/k6/ui/pb" +) + +// BaseExecutor is a helper struct that contains common properties and methods +// between most executors. It is intended to be used as an anonymous struct +// inside of most of the executors, for the purpose of reducing boilerplate +// code. +type BaseExecutor struct { + config lib.ExecutorConfig + executionState *lib.ExecutionState + logger *logrus.Entry + progress *pb.ProgressBar +} + +// NewBaseExecutor returns an initialized BaseExecutor +func NewBaseExecutor(config lib.ExecutorConfig, es *lib.ExecutionState, logger *logrus.Entry) *BaseExecutor { + return &BaseExecutor{ + config: config, + executionState: es, + logger: logger, + progress: pb.New( + pb.WithLeft(config.GetName), + pb.WithLogger(logger), + ), + } +} + +// Init doesn't do anything for most executors, since initialization of all +// planned VUs is handled by the executor. +func (bs *BaseExecutor) Init(_ context.Context) error { + return nil +} + +// GetConfig returns the configuration with which this executor was launched. +func (bs BaseExecutor) GetConfig() lib.ExecutorConfig { + return bs.config +} + +// GetLogger returns the executor logger entry. +func (bs BaseExecutor) GetLogger() *logrus.Entry { + return bs.logger +} + +// GetProgress just returns the progressbar pointer. +func (bs BaseExecutor) GetProgress() *pb.ProgressBar { + return bs.progress +} + +// getMetricTags returns a tag set that can be used to emit metrics by the +// executor. The VU ID is optional. +func (bs BaseExecutor) getMetricTags(vuID *int64) *stats.SampleTags { + tags := bs.executionState.Options.RunTags.CloneTags() + if bs.executionState.Options.SystemTags.Has(stats.TagScenario) { + tags["scenario"] = bs.config.GetName() + } + if vuID != nil && bs.executionState.Options.SystemTags.Has(stats.TagVU) { + tags["vu"] = strconv.FormatInt(*vuID, 10) + } + return stats.IntoSampleTags(&tags) +} diff --git a/lib/executor/common_test.go b/lib/executor/common_test.go new file mode 100644 index 00000000000..280ac996e7e --- /dev/null +++ b/lib/executor/common_test.go @@ -0,0 +1,87 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "io/ioutil" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/testutils" + "github.com/loadimpact/k6/lib/testutils/minirunner" + "github.com/loadimpact/k6/stats" +) + +func simpleRunner(vuFn func(context.Context) error) lib.Runner { + return &minirunner.MiniRunner{ + Fn: func(ctx context.Context, _ chan<- stats.SampleContainer) error { + return vuFn(ctx) + }, + } +} + +func setupExecutor(t *testing.T, config lib.ExecutorConfig, es *lib.ExecutionState, runner lib.Runner) ( + context.Context, context.CancelFunc, lib.Executor, *testutils.SimpleLogrusHook, +) { + ctx, cancel := context.WithCancel(context.Background()) + engineOut := make(chan stats.SampleContainer, 100) // TODO: return this for more complicated tests? + + logHook := &testutils.SimpleLogrusHook{HookedLevels: []logrus.Level{logrus.WarnLevel}} + testLog := logrus.New() + testLog.AddHook(logHook) + testLog.SetOutput(ioutil.Discard) + logEntry := logrus.NewEntry(testLog) + + initVUFunc := func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { + return runner.NewVU(int64(es.GetUniqueVUIdentifier()), engineOut) + } + es.SetInitVUFunc(initVUFunc) + + et, err := lib.NewExecutionTuple(es.Options.ExecutionSegment, es.Options.ExecutionSegmentSequence) + require.NoError(t, err) + + maxPlannedVUs := lib.GetMaxPlannedVUs(config.GetExecutionRequirements(et)) + initializeVUs(ctx, t, logEntry, es, maxPlannedVUs, initVUFunc) + + executor, err := config.NewExecutor(es, logEntry) + require.NoError(t, err) + + err = executor.Init(ctx) + require.NoError(t, err) + return ctx, cancel, executor, logHook +} + +func initializeVUs( + ctx context.Context, t testing.TB, logEntry *logrus.Entry, es *lib.ExecutionState, number uint64, initVU lib.InitVUFunc, +) { + // This is not how the local ExecutionScheduler initializes VUs, but should do the same job + for i := uint64(0); i < number; i++ { + // Not calling es.InitializeNewVU() here to avoid a double increment of initializedVUs, + // which is done in es.AddInitializedVU(). + vu, err := initVU(ctx, logEntry) + require.NoError(t, err) + es.AddInitializedVU(vu) + } +} diff --git a/lib/executor/constant_arrival_rate.go b/lib/executor/constant_arrival_rate.go new file mode 100644 index 00000000000..4c1a9e0d49f --- /dev/null +++ b/lib/executor/constant_arrival_rate.go @@ -0,0 +1,374 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "fmt" + "math" + "math/big" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/metrics" + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats" + "github.com/loadimpact/k6/ui/pb" +) + +const constantArrivalRateType = "constant-arrival-rate" + +func init() { + lib.RegisterExecutorConfigType( + constantArrivalRateType, + func(name string, rawJSON []byte) (lib.ExecutorConfig, error) { + config := NewConstantArrivalRateConfig(name) + err := lib.StrictJSONUnmarshal(rawJSON, &config) + return config, err + }, + ) +} + +// ConstantArrivalRateConfig stores config for the constant arrival-rate executor +type ConstantArrivalRateConfig struct { + BaseConfig + Rate null.Int `json:"rate"` + TimeUnit types.NullDuration `json:"timeUnit"` + Duration types.NullDuration `json:"duration"` + + // Initialize `PreAllocatedVUs` number of VUs, and if more than that are needed, + // they will be dynamically allocated, until `MaxVUs` is reached, which is an + // absolutely hard limit on the number of VUs the executor will use + PreAllocatedVUs null.Int `json:"preAllocatedVUs"` + MaxVUs null.Int `json:"maxVUs"` +} + +// NewConstantArrivalRateConfig returns a ConstantArrivalRateConfig with default values +func NewConstantArrivalRateConfig(name string) *ConstantArrivalRateConfig { + return &ConstantArrivalRateConfig{ + BaseConfig: NewBaseConfig(name, constantArrivalRateType), + TimeUnit: types.NewNullDuration(1*time.Second, false), + } +} + +// Make sure we implement the lib.ExecutorConfig interface +var _ lib.ExecutorConfig = &ConstantArrivalRateConfig{} + +// GetPreAllocatedVUs is just a helper method that returns the scaled pre-allocated VUs. +func (carc ConstantArrivalRateConfig) GetPreAllocatedVUs(et *lib.ExecutionTuple) int64 { + return et.ScaleInt64(carc.PreAllocatedVUs.Int64) +} + +// GetMaxVUs is just a helper method that returns the scaled max VUs. +func (carc ConstantArrivalRateConfig) GetMaxVUs(et *lib.ExecutionTuple) int64 { + return et.ScaleInt64(carc.MaxVUs.Int64) +} + +// GetDescription returns a human-readable description of the executor options +func (carc ConstantArrivalRateConfig) GetDescription(et *lib.ExecutionTuple) string { + preAllocatedVUs, maxVUs := carc.GetPreAllocatedVUs(et), carc.GetMaxVUs(et) + maxVUsRange := fmt.Sprintf("maxVUs: %d", preAllocatedVUs) + if maxVUs > preAllocatedVUs { + maxVUsRange += fmt.Sprintf("-%d", maxVUs) + } + + timeUnit := time.Duration(carc.TimeUnit.Duration) + var arrRatePerSec float64 + if maxVUs != 0 { // TODO: do something better? + ratio := big.NewRat(maxVUs, carc.MaxVUs.Int64) + arrRate := big.NewRat(carc.Rate.Int64, int64(timeUnit)) + arrRate.Mul(arrRate, ratio) + arrRatePerSec, _ = getArrivalRatePerSec(arrRate).Float64() + } + + return fmt.Sprintf("%.2f iterations/s for %s%s", arrRatePerSec, carc.Duration.Duration, + carc.getBaseInfo(maxVUsRange)) +} + +// Validate makes sure all options are configured and valid +func (carc *ConstantArrivalRateConfig) Validate() []error { + errors := carc.BaseConfig.Validate() + if !carc.Rate.Valid { + errors = append(errors, fmt.Errorf("the iteration rate isn't specified")) + } else if carc.Rate.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the iteration rate should be more than 0")) + } + + if time.Duration(carc.TimeUnit.Duration) <= 0 { + errors = append(errors, fmt.Errorf("the timeUnit should be more than 0")) + } + + if !carc.Duration.Valid { + errors = append(errors, fmt.Errorf("the duration is unspecified")) + } else if time.Duration(carc.Duration.Duration) < minDuration { + errors = append(errors, fmt.Errorf( + "the duration should be at least %s, but is %s", minDuration, carc.Duration, + )) + } + + if !carc.PreAllocatedVUs.Valid { + errors = append(errors, fmt.Errorf("the number of preAllocatedVUs isn't specified")) + } else if carc.PreAllocatedVUs.Int64 < 0 { + errors = append(errors, fmt.Errorf("the number of preAllocatedVUs shouldn't be negative")) + } + + if !carc.MaxVUs.Valid { + // TODO: don't change the config while validating + carc.MaxVUs.Int64 = carc.PreAllocatedVUs.Int64 + } else if carc.MaxVUs.Int64 < carc.PreAllocatedVUs.Int64 { + errors = append(errors, fmt.Errorf("maxVUs shouldn't be less than preAllocatedVUs")) + } + + return errors +} + +// GetExecutionRequirements returns the number of required VUs to run the +// executor for its whole duration (disregarding any startTime), including the +// maximum waiting time for any iterations to gracefully stop. This is used by +// the execution scheduler in its VU reservation calculations, so it knows how +// many VUs to pre-initialize. +func (carc ConstantArrivalRateConfig) GetExecutionRequirements(et *lib.ExecutionTuple) []lib.ExecutionStep { + return []lib.ExecutionStep{ + { + TimeOffset: 0, + PlannedVUs: uint64(et.ScaleInt64(carc.PreAllocatedVUs.Int64)), + MaxUnplannedVUs: uint64(et.ScaleInt64(carc.MaxVUs.Int64) - et.ScaleInt64(carc.PreAllocatedVUs.Int64)), + }, { + TimeOffset: time.Duration(carc.Duration.Duration + carc.GracefulStop.Duration), + PlannedVUs: 0, + MaxUnplannedVUs: 0, + }, + } +} + +// NewExecutor creates a new ConstantArrivalRate executor +func (carc ConstantArrivalRateConfig) NewExecutor( + es *lib.ExecutionState, logger *logrus.Entry, +) (lib.Executor, error) { + return &ConstantArrivalRate{ + BaseExecutor: NewBaseExecutor(&carc, es, logger), + config: carc, + }, nil +} + +// HasWork reports whether there is any work to be done for the given execution segment. +func (carc ConstantArrivalRateConfig) HasWork(et *lib.ExecutionTuple) bool { + return carc.GetMaxVUs(et) > 0 +} + +// ConstantArrivalRate tries to execute a specific number of iterations for a +// specific period. +type ConstantArrivalRate struct { + *BaseExecutor + config ConstantArrivalRateConfig + et *lib.ExecutionTuple +} + +// Make sure we implement the lib.Executor interface. +var _ lib.Executor = &ConstantArrivalRate{} + +// Init values needed for the execution +func (car *ConstantArrivalRate) Init(ctx context.Context) error { + // err should always be nil, because Init() won't be called for executors + // with no work, as determined by their config's HasWork() method. + et, err := car.BaseExecutor.executionState.ExecutionTuple.GetNewExecutionTupleFromValue(car.config.MaxVUs.Int64) + car.et = et + return err +} + +// Run executes a constant number of iterations per second. +// +// TODO: Split this up and make an independent component that can be reused +// between the constant and ramping arrival rate executors - that way we can +// keep the complexity in one well-architected part (with short methods and few +// lambdas :D), while having both config frontends still be present for maximum +// UX benefits. Basically, keep the progress bars and scheduling (i.e. at what +// time should iteration X begin) different, but keep everything else the same. +// This will allow us to implement https://github.com/loadimpact/k6/issues/1386 +// and things like all of the TODOs below in one place only. +//nolint:funlen +func (car ConstantArrivalRate) Run(parentCtx context.Context, out chan<- stats.SampleContainer) (err error) { + gracefulStop := car.config.GetGracefulStop() + duration := time.Duration(car.config.Duration.Duration) + preAllocatedVUs := car.config.GetPreAllocatedVUs(car.executionState.ExecutionTuple) + maxVUs := car.config.GetMaxVUs(car.executionState.ExecutionTuple) + // TODO: refactor and simplify + arrivalRate := getScaledArrivalRate(car.et.Segment, car.config.Rate.Int64, time.Duration(car.config.TimeUnit.Duration)) + tickerPeriod := time.Duration(getTickerPeriod(arrivalRate).Duration) + arrivalRatePerSec, _ := getArrivalRatePerSec(arrivalRate).Float64() + + // Make sure the log and the progress bar have accurate information + car.logger.WithFields(logrus.Fields{ + "maxVUs": maxVUs, "preAllocatedVUs": preAllocatedVUs, "duration": duration, + "tickerPeriod": tickerPeriod, "type": car.config.GetType(), + }).Debug("Starting executor run...") + + activeVUsWg := &sync.WaitGroup{} + + returnedVUs := make(chan struct{}) + startTime, maxDurationCtx, regDurationCtx, cancel := getDurationContexts(parentCtx, duration, gracefulStop) + + defer func() { + // Make sure all VUs aren't executing iterations anymore, for the cancel() + // below to deactivate them. + <-returnedVUs + cancel() + activeVUsWg.Wait() + }() + activeVUs := make(chan lib.ActiveVU, maxVUs) + activeVUsCount := uint64(0) + + activationParams := getVUActivationParams(maxDurationCtx, car.config.BaseConfig, + func(u lib.InitializedVU) { + car.executionState.ReturnVU(u, true) + activeVUsWg.Done() + }) + activateVU := func(initVU lib.InitializedVU) lib.ActiveVU { + activeVUsWg.Add(1) + activeVU := initVU.Activate(activationParams) + car.executionState.ModCurrentlyActiveVUsCount(+1) + atomic.AddUint64(&activeVUsCount, 1) + return activeVU + } + + remainingUnplannedVUs := maxVUs - preAllocatedVUs + makeUnplannedVUCh := make(chan struct{}) + defer close(makeUnplannedVUCh) + go func() { + defer close(returnedVUs) + defer func() { + // this is done here as to not have an unplannedVU in the middle of initialization when + // starting to return activeVUs + for i := uint64(0); i < atomic.LoadUint64(&activeVUsCount); i++ { + <-activeVUs + } + }() + for range makeUnplannedVUCh { + car.logger.Debug("Starting initialization of an unplanned VU...") + initVU, err := car.executionState.GetUnplannedVU(maxDurationCtx, car.logger) + if err != nil { + // TODO figure out how to return it to the Run goroutine + car.logger.WithError(err).Error("Error while allocating unplanned VU") + } else { + car.logger.Debug("The unplanned VU finished initializing successfully!") + activeVUs <- activateVU(initVU) + } + } + }() + + // Get the pre-allocated VUs in the local buffer + for i := int64(0); i < preAllocatedVUs; i++ { + initVU, err := car.executionState.GetPlannedVU(car.logger, false) + if err != nil { + return err + } + activeVUs <- activateVU(initVU) + } + + vusFmt := pb.GetFixedLengthIntFormat(maxVUs) + progIters := fmt.Sprintf( + pb.GetFixedLengthFloatFormat(arrivalRatePerSec, 0)+" iters/s", arrivalRatePerSec) + progressFn := func() (float64, []string) { + spent := time.Since(startTime) + currActiveVUs := atomic.LoadUint64(&activeVUsCount) + vusInBuffer := uint64(len(activeVUs)) + progVUs := fmt.Sprintf(vusFmt+"/"+vusFmt+" VUs", + currActiveVUs-vusInBuffer, currActiveVUs) + + right := []string{progVUs, duration.String(), progIters} + + if spent > duration { + return 1, right + } + + spentDuration := pb.GetFixedLengthDuration(spent, duration) + progDur := fmt.Sprintf("%s/%s", spentDuration, duration) + right[1] = progDur + + return math.Min(1, float64(spent)/float64(duration)), right + } + car.progress.Modify(pb.WithProgress(progressFn)) + go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, &car, progressFn) + + runIterationBasic := getIterationRunner(car.executionState, car.logger) + runIteration := func(vu lib.ActiveVU) { + runIterationBasic(maxDurationCtx, vu) + activeVUs <- vu + } + + start, offsets, _ := car.et.GetStripedOffsets() + timer := time.NewTimer(time.Hour * 24) + // here the we need the not scaled one + notScaledTickerPeriod := time.Duration( + getTickerPeriod( + big.NewRat( + car.config.Rate.Int64, + int64(time.Duration(car.config.TimeUnit.Duration)), + )).Duration) + + shownWarning := false + metricTags := car.getMetricTags(nil) + for li, gi := 0, start; ; li, gi = li+1, gi+offsets[li%len(offsets)] { + t := notScaledTickerPeriod*time.Duration(gi) - time.Since(startTime) + timer.Reset(t) + select { + case <-timer.C: + select { + case vu := <-activeVUs: // ideally, we get the VU from the buffer without any issues + go runIteration(vu) //TODO: refactor so we dont spin up a goroutine for each iteration + continue + default: // no free VUs currently available + } + + // Since there aren't any free VUs available, consider this iteration + // dropped - we aren't going to try to recover it, but + + stats.PushIfNotDone(parentCtx, out, stats.Sample{ + Value: 1, Metric: metrics.DroppedIterations, + Tags: metricTags, Time: time.Now(), + }) + + // We'll try to start allocating another VU in the background, + // non-blockingly, if we have remainingUnplannedVUs... + if remainingUnplannedVUs == 0 { + if !shownWarning { + car.logger.Warningf("Insufficient VUs, reached %d active VUs and cannot initialize more", maxVUs) + shownWarning = true + } + continue + } + + select { + case makeUnplannedVUCh <- struct{}{}: // great! + remainingUnplannedVUs-- + default: // we're already allocating a new VU + } + + case <-regDurationCtx.Done(): + return nil + } + } +} diff --git a/lib/executor/constant_arrival_rate_test.go b/lib/executor/constant_arrival_rate_test.go new file mode 100644 index 00000000000..050d1b430d0 --- /dev/null +++ b/lib/executor/constant_arrival_rate_test.go @@ -0,0 +1,334 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/metrics" + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats" +) + +func newExecutionSegmentFromString(str string) *lib.ExecutionSegment { + r, err := lib.NewExecutionSegmentFromString(str) + if err != nil { + panic(err) + } + return r +} + +func newExecutionSegmentSequenceFromString(str string) *lib.ExecutionSegmentSequence { + r, err := lib.NewExecutionSegmentSequenceFromString(str) + if err != nil { + panic(err) + } + return &r +} + +func getTestConstantArrivalRateConfig() *ConstantArrivalRateConfig { + return &ConstantArrivalRateConfig{ + BaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(1 * time.Second)}, + TimeUnit: types.NullDurationFrom(time.Second), + Rate: null.IntFrom(50), + Duration: types.NullDurationFrom(5 * time.Second), + PreAllocatedVUs: null.IntFrom(10), + MaxVUs: null.IntFrom(20), + } +} + +func TestConstantArrivalRateRunNotEnoughAllocatedVUsWarn(t *testing.T) { + t.Parallel() + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + ctx, cancel, executor, logHook := setupExecutor( + t, getTestConstantArrivalRateConfig(), es, + simpleRunner(func(ctx context.Context) error { + time.Sleep(time.Second) + return nil + }), + ) + defer cancel() + engineOut := make(chan stats.SampleContainer, 1000) + err = executor.Run(ctx, engineOut) + require.NoError(t, err) + entries := logHook.Drain() + require.NotEmpty(t, entries) + for _, entry := range entries { + require.Equal(t, + "Insufficient VUs, reached 20 active VUs and cannot initialize more", + entry.Message) + require.Equal(t, logrus.WarnLevel, entry.Level) + } +} + +func TestConstantArrivalRateRunCorrectRate(t *testing.T) { + t.Parallel() + var count int64 + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + ctx, cancel, executor, logHook := setupExecutor( + t, getTestConstantArrivalRateConfig(), es, + simpleRunner(func(ctx context.Context) error { + atomic.AddInt64(&count, 1) + return nil + }), + ) + defer cancel() + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + // check that we got around the amount of VU iterations as we would expect + var currentCount int64 + + for i := 0; i < 5; i++ { + time.Sleep(time.Second) + currentCount = atomic.SwapInt64(&count, 0) + require.InDelta(t, 50, currentCount, 1) + } + }() + engineOut := make(chan stats.SampleContainer, 1000) + err = executor.Run(ctx, engineOut) + wg.Wait() + require.NoError(t, err) + require.Empty(t, logHook.Drain()) +} + +func TestConstantArrivalRateRunCorrectTiming(t *testing.T) { + tests := []struct { + segment *lib.ExecutionSegment + sequence *lib.ExecutionSegmentSequence + start time.Duration + steps []int64 + }{ + { + segment: newExecutionSegmentFromString("0:1/3"), + start: time.Millisecond * 20, + steps: []int64{40, 60, 60, 60, 60, 60, 60}, + }, + { + segment: newExecutionSegmentFromString("1/3:2/3"), + start: time.Millisecond * 20, + steps: []int64{60, 60, 60, 60, 60, 60, 40}, + }, + { + segment: newExecutionSegmentFromString("2/3:1"), + start: time.Millisecond * 20, + steps: []int64{40, 60, 60, 60, 60, 60, 60}, + }, + { + segment: newExecutionSegmentFromString("1/6:3/6"), + start: time.Millisecond * 20, + steps: []int64{40, 80, 40, 80, 40, 80, 40}, + }, + { + segment: newExecutionSegmentFromString("1/6:3/6"), + sequence: newExecutionSegmentSequenceFromString("1/6,3/6"), + start: time.Millisecond * 20, + steps: []int64{40, 80, 40, 80, 40, 80, 40}, + }, + // sequences + { + segment: newExecutionSegmentFromString("0:1/3"), + sequence: newExecutionSegmentSequenceFromString("0,1/3,2/3,1"), + start: time.Millisecond * 00, + steps: []int64{60, 60, 60, 60, 60, 60, 40}, + }, + { + segment: newExecutionSegmentFromString("1/3:2/3"), + sequence: newExecutionSegmentSequenceFromString("0,1/3,2/3,1"), + start: time.Millisecond * 20, + steps: []int64{60, 60, 60, 60, 60, 60, 40}, + }, + { + segment: newExecutionSegmentFromString("2/3:1"), + sequence: newExecutionSegmentSequenceFromString("0,1/3,2/3,1"), + start: time.Millisecond * 40, + steps: []int64{60, 60, 60, 60, 60, 100}, + }, + } + for _, test := range tests { + test := test + + t.Run(fmt.Sprintf("segment %s sequence %s", test.segment, test.sequence), func(t *testing.T) { + t.Parallel() + et, err := lib.NewExecutionTuple(test.segment, test.sequence) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{ + ExecutionSegment: test.segment, + ExecutionSegmentSequence: test.sequence, + }, et, 10, 50) + var count int64 + config := getTestConstantArrivalRateConfig() + config.Duration.Duration = types.Duration(time.Second * 3) + newET, err := es.ExecutionTuple.GetNewExecutionTupleFromValue(config.MaxVUs.Int64) + require.NoError(t, err) + rateScaled := newET.ScaleInt64(config.Rate.Int64) + startTime := time.Now() + expectedTimeInt64 := int64(test.start) + ctx, cancel, executor, logHook := setupExecutor( + t, config, es, + simpleRunner(func(ctx context.Context) error { + current := atomic.AddInt64(&count, 1) + + expectedTime := test.start + if current != 1 { + expectedTime = time.Duration(atomic.AddInt64(&expectedTimeInt64, + int64(time.Millisecond)*test.steps[(current-2)%int64(len(test.steps))])) + } + assert.WithinDuration(t, + startTime.Add(expectedTime), + time.Now(), + time.Millisecond*10, + "%d expectedTime %s", current, expectedTime, + ) + + return nil + }), + ) + + defer cancel() + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + // check that we got around the amount of VU iterations as we would expect + var currentCount int64 + + for i := 0; i < 3; i++ { + time.Sleep(time.Second) + currentCount = atomic.LoadInt64(&count) + assert.InDelta(t, int64(i+1)*rateScaled, currentCount, 3) + } + }() + startTime = time.Now() + engineOut := make(chan stats.SampleContainer, 1000) + err = executor.Run(ctx, engineOut) + wg.Wait() + require.NoError(t, err) + require.Empty(t, logHook.Drain()) + }) + } +} + +func TestArrivalRateCancel(t *testing.T) { + t.Parallel() + + testCases := map[string]lib.ExecutorConfig{ + "constant": getTestConstantArrivalRateConfig(), + "ramping": getTestRampingArrivalRateConfig(), + } + for name, config := range testCases { + config := config + t.Run(name, func(t *testing.T) { + t.Parallel() + ch := make(chan struct{}) + errCh := make(chan error, 1) + weAreDoneCh := make(chan struct{}) + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + ctx, cancel, executor, logHook := setupExecutor( + t, config, es, simpleRunner(func(ctx context.Context) error { + select { + case <-ch: + <-ch + default: + } + return nil + })) + defer cancel() + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + + engineOut := make(chan stats.SampleContainer, 1000) + errCh <- executor.Run(ctx, engineOut) + close(weAreDoneCh) + }() + + time.Sleep(time.Second) + ch <- struct{}{} + cancel() + time.Sleep(time.Second) + select { + case <-weAreDoneCh: + t.Fatal("Run returned before all VU iterations were finished") + default: + } + close(ch) + <-weAreDoneCh + wg.Wait() + require.NoError(t, <-errCh) + require.Empty(t, logHook.Drain()) + }) + } +} + +func TestConstantArrivalRateDroppedIterations(t *testing.T) { + t.Parallel() + var count int64 + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + + config := &ConstantArrivalRateConfig{ + BaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(0 * time.Second)}, + TimeUnit: types.NullDurationFrom(time.Second), + Rate: null.IntFrom(20), + Duration: types.NullDurationFrom(950 * time.Millisecond), + PreAllocatedVUs: null.IntFrom(10), + MaxVUs: null.IntFrom(10), + } + + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + ctx, cancel, executor, logHook := setupExecutor( + t, config, es, + simpleRunner(func(ctx context.Context) error { + atomic.AddInt64(&count, 1) + <-ctx.Done() + return nil + }), + ) + defer cancel() + engineOut := make(chan stats.SampleContainer, 1000) + err = executor.Run(ctx, engineOut) + require.NoError(t, err) + logs := logHook.Drain() + require.Len(t, logs, 1) + assert.Contains(t, logs[0].Message, "cannot initialize more") + assert.Equal(t, int64(10), count) + assert.Equal(t, float64(10), sumMetricValues(engineOut, metrics.DroppedIterations.Name)) +} diff --git a/lib/executor/constant_vus.go b/lib/executor/constant_vus.go new file mode 100644 index 00000000000..11a1c7a5685 --- /dev/null +++ b/lib/executor/constant_vus.go @@ -0,0 +1,215 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/sirupsen/logrus" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats" + "github.com/loadimpact/k6/ui/pb" +) + +const constantVUsType = "constant-vus" + +func init() { + lib.RegisterExecutorConfigType( + constantVUsType, + func(name string, rawJSON []byte) (lib.ExecutorConfig, error) { + config := NewConstantVUsConfig(name) + err := lib.StrictJSONUnmarshal(rawJSON, &config) + return config, err + }, + ) +} + +// The minimum duration we'll allow users to schedule. This doesn't affect the stages +// configuration, where 0-duration virtual stages are allowed for instantaneous VU jumps +const minDuration = 1 * time.Second + +// ConstantVUsConfig stores VUs and duration +type ConstantVUsConfig struct { + BaseConfig + VUs null.Int `json:"vus"` + Duration types.NullDuration `json:"duration"` +} + +// NewConstantVUsConfig returns a ConstantVUsConfig with default values +func NewConstantVUsConfig(name string) ConstantVUsConfig { + return ConstantVUsConfig{ + BaseConfig: NewBaseConfig(name, constantVUsType), + VUs: null.NewInt(1, false), + } +} + +// Make sure we implement the lib.ExecutorConfig interface +var _ lib.ExecutorConfig = &ConstantVUsConfig{} + +// GetVUs returns the scaled VUs for the executor. +func (clvc ConstantVUsConfig) GetVUs(et *lib.ExecutionTuple) int64 { + return et.Segment.Scale(clvc.VUs.Int64) +} + +// GetDescription returns a human-readable description of the executor options +func (clvc ConstantVUsConfig) GetDescription(et *lib.ExecutionTuple) string { + return fmt.Sprintf("%d looping VUs for %s%s", + clvc.GetVUs(et), clvc.Duration.Duration, clvc.getBaseInfo()) +} + +// Validate makes sure all options are configured and valid +func (clvc ConstantVUsConfig) Validate() []error { + errors := clvc.BaseConfig.Validate() + if clvc.VUs.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the number of VUs should be more than 0")) + } + + if !clvc.Duration.Valid { + errors = append(errors, fmt.Errorf("the duration is unspecified")) + } else if time.Duration(clvc.Duration.Duration) < minDuration { + errors = append(errors, fmt.Errorf( + "the duration should be at least %s, but is %s", minDuration, clvc.Duration, + )) + } + + return errors +} + +// GetExecutionRequirements returns the number of required VUs to run the +// executor for its whole duration (disregarding any startTime), including the +// maximum waiting time for any iterations to gracefully stop. This is used by +// the execution scheduler in its VU reservation calculations, so it knows how +// many VUs to pre-initialize. +func (clvc ConstantVUsConfig) GetExecutionRequirements(et *lib.ExecutionTuple) []lib.ExecutionStep { + return []lib.ExecutionStep{ + { + TimeOffset: 0, + PlannedVUs: uint64(clvc.GetVUs(et)), + }, + { + TimeOffset: time.Duration(clvc.Duration.Duration + clvc.GracefulStop.Duration), + PlannedVUs: 0, + }, + } +} + +// HasWork reports whether there is any work to be done for the given execution segment. +func (clvc ConstantVUsConfig) HasWork(et *lib.ExecutionTuple) bool { + return clvc.GetVUs(et) > 0 +} + +// NewExecutor creates a new ConstantVUs executor +func (clvc ConstantVUsConfig) NewExecutor(es *lib.ExecutionState, logger *logrus.Entry) (lib.Executor, error) { + return ConstantVUs{ + BaseExecutor: NewBaseExecutor(clvc, es, logger), + config: clvc, + }, nil +} + +// ConstantVUs maintains a constant number of VUs running for the +// specified duration. +type ConstantVUs struct { + *BaseExecutor + config ConstantVUsConfig +} + +// Make sure we implement the lib.Executor interface. +var _ lib.Executor = &ConstantVUs{} + +// Run constantly loops through as many iterations as possible on a fixed number +// of VUs for the specified duration. +func (clv ConstantVUs) Run(parentCtx context.Context, out chan<- stats.SampleContainer) (err error) { + numVUs := clv.config.GetVUs(clv.executionState.ExecutionTuple) + duration := time.Duration(clv.config.Duration.Duration) + gracefulStop := clv.config.GetGracefulStop() + + startTime, maxDurationCtx, regDurationCtx, cancel := getDurationContexts(parentCtx, duration, gracefulStop) + defer cancel() + + // Make sure the log and the progress bar have accurate information + clv.logger.WithFields( + logrus.Fields{"vus": numVUs, "duration": duration, "type": clv.config.GetType()}, + ).Debug("Starting executor run...") + + progressFn := func() (float64, []string) { + spent := time.Since(startTime) + right := []string{fmt.Sprintf("%d VUs", numVUs)} + if spent > duration { + right = append(right, duration.String()) + return 1, right + } + right = append(right, fmt.Sprintf("%s/%s", + pb.GetFixedLengthDuration(spent, duration), duration)) + return float64(spent) / float64(duration), right + } + clv.progress.Modify(pb.WithProgress(progressFn)) + go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, clv, progressFn) + + // Actually schedule the VUs and iterations... + activeVUs := &sync.WaitGroup{} + defer activeVUs.Wait() + + regDurationDone := regDurationCtx.Done() + runIteration := getIterationRunner(clv.executionState, clv.logger) + + activationParams := getVUActivationParams(maxDurationCtx, clv.config.BaseConfig, + func(u lib.InitializedVU) { + clv.executionState.ReturnVU(u, true) + activeVUs.Done() + }) + handleVU := func(initVU lib.InitializedVU) { + ctx, cancel := context.WithCancel(maxDurationCtx) + defer cancel() + + newParams := *activationParams + newParams.RunContext = ctx + + activeVU := initVU.Activate(&newParams) + + for { + select { + case <-regDurationDone: + return // don't make more iterations + default: + // continue looping + } + runIteration(maxDurationCtx, activeVU) + } + } + + for i := int64(0); i < numVUs; i++ { + initVU, err := clv.executionState.GetPlannedVU(clv.logger, true) + if err != nil { + cancel() + return err + } + activeVUs.Add(1) + go handleVU(initVU) + } + + return nil +} diff --git a/lib/executor/constant_vus_test.go b/lib/executor/constant_vus_test.go new file mode 100644 index 00000000000..7e91c36c2a8 --- /dev/null +++ b/lib/executor/constant_vus_test.go @@ -0,0 +1,78 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/types" +) + +func getTestConstantVUsConfig() ConstantVUsConfig { + return ConstantVUsConfig{ + BaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(1 * time.Second)}, + VUs: null.IntFrom(10), + Duration: types.NullDurationFrom(1 * time.Second), + } +} + +func TestConstantVUsRun(t *testing.T) { + t.Parallel() + var result sync.Map + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + var ctx, cancel, executor, _ = setupExecutor( + t, getTestConstantVUsConfig(), es, + simpleRunner(func(ctx context.Context) error { + time.Sleep(200 * time.Millisecond) + select { + case <-ctx.Done(): + return nil + default: + } + state := lib.GetState(ctx) + currIter, _ := result.LoadOrStore(state.Vu, uint64(0)) + result.Store(state.Vu, currIter.(uint64)+1) + return nil + }), + ) + defer cancel() + err = executor.Run(ctx, nil) + require.NoError(t, err) + + var totalIters uint64 + result.Range(func(key, value interface{}) bool { + vuIters := value.(uint64) + assert.Equal(t, uint64(5), vuIters) + totalIters += vuIters + return true + }) + assert.Equal(t, uint64(50), totalIters) +} diff --git a/lib/executor/execution_config_shortcuts.go b/lib/executor/execution_config_shortcuts.go new file mode 100644 index 00000000000..418a0ec160a --- /dev/null +++ b/lib/executor/execution_config_shortcuts.go @@ -0,0 +1,148 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "github.com/sirupsen/logrus" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/types" +) + +// ExecutionConflictError is a custom error type used for all of the errors in +// the DeriveScenariosFromShortcuts() function. +type ExecutionConflictError string + +func (e ExecutionConflictError) Error() string { + return string(e) +} + +var _ error = ExecutionConflictError("") + +func getConstantVUsScenario(duration types.NullDuration, vus null.Int) lib.ScenarioConfigs { + ds := NewConstantVUsConfig(lib.DefaultScenarioName) + ds.VUs = vus + ds.Duration = duration + return lib.ScenarioConfigs{lib.DefaultScenarioName: ds} +} + +func getRampingVUsScenario(stages []lib.Stage, startVUs null.Int) lib.ScenarioConfigs { + ds := NewRampingVUsConfig(lib.DefaultScenarioName) + ds.StartVUs = startVUs + for _, s := range stages { + if s.Duration.Valid { + ds.Stages = append(ds.Stages, Stage{Duration: s.Duration, Target: s.Target}) + } + } + return lib.ScenarioConfigs{lib.DefaultScenarioName: ds} +} + +func getSharedIterationsScenario(iters null.Int, duration types.NullDuration, vus null.Int) lib.ScenarioConfigs { + ds := NewSharedIterationsConfig(lib.DefaultScenarioName) + ds.VUs = vus + ds.Iterations = iters + if duration.Valid { + ds.MaxDuration = duration + } + return lib.ScenarioConfigs{lib.DefaultScenarioName: ds} +} + +// DeriveScenariosFromShortcuts checks for conflicting options and turns any +// shortcut options (i.e. duration, iterations, stages) into the proper +// long-form scenario/executor configuration in the scenarios property. +func DeriveScenariosFromShortcuts(opts lib.Options) (lib.Options, error) { + result := opts + + switch { + case opts.Iterations.Valid: + if len(opts.Stages) > 0 { // stages isn't nil (not set) and isn't explicitly set to empty + return result, ExecutionConflictError( + "using multiple execution config shortcuts (`iterations` and `stages`) simultaneously is not allowed", + ) + } + if opts.Scenarios != nil { + return opts, ExecutionConflictError( + "using an execution configuration shortcut (`iterations`) and `scenarios` simultaneously is not allowed", + ) + } + result.Scenarios = getSharedIterationsScenario(opts.Iterations, opts.Duration, opts.VUs) + + case opts.Duration.Valid: + if len(opts.Stages) > 0 { // stages isn't nil (not set) and isn't explicitly set to empty + return result, ExecutionConflictError( + "using multiple execution config shortcuts (`duration` and `stages`) simultaneously is not allowed", + ) + } + if opts.Scenarios != nil { + return result, ExecutionConflictError( + "using an execution configuration shortcut (`duration`) and `scenarios` simultaneously is not allowed", + ) + } + if opts.Duration.Duration <= 0 { + //TODO: move this validation to Validate()? + return result, ExecutionConflictError( + "`duration` should be more than 0, for infinite duration use the externally-controlled executor", + ) + } + result.Scenarios = getConstantVUsScenario(opts.Duration, opts.VUs) + + case len(opts.Stages) > 0: // stages isn't nil (not set) and isn't explicitly set to empty + if opts.Scenarios != nil { + return opts, ExecutionConflictError( + "using an execution configuration shortcut (`stages`) and `scenarios` simultaneously is not allowed", + ) + } + result.Scenarios = getRampingVUsScenario(opts.Stages, opts.VUs) + + case len(opts.Scenarios) > 0: + // Do nothing, scenarios was explicitly specified + + default: + // Check if we should emit some warnings + if opts.VUs.Valid && opts.VUs.Int64 != 1 { + logrus.Warnf( + "the `vus=%d` option will be ignored, it only works in conjunction with `iterations`, `duration`, or `stages`", + opts.VUs.Int64, + ) + } + if opts.Stages != nil && len(opts.Stages) == 0 { + // No someone explicitly set stages to empty + logrus.Warnf("`stages` was explicitly set to an empty value, running the script with 1 iteration in 1 VU") + } + if opts.Scenarios != nil && len(opts.Scenarios) == 0 { + // No shortcut, and someone explicitly set execution to empty + logrus.Warnf("`scenarios` was explicitly set to an empty value, running the script with 1 iteration in 1 VU") + } + // No execution parameters whatsoever were specified, so we'll create a per-VU iterations config + // with 1 VU and 1 iteration. + result.Scenarios = lib.ScenarioConfigs{ + lib.DefaultScenarioName: NewPerVUIterationsConfig(lib.DefaultScenarioName), + } + } + + //TODO: validate the config; questions: + // - separately validate the duration, iterations and stages for better error messages? + // - or reuse the execution validation somehow, at the end? or something mixed? + // - here or in getConsolidatedConfig() or somewhere else? + + return result, nil +} diff --git a/lib/executor/execution_test.go b/lib/executor/execution_test.go new file mode 100644 index 00000000000..39470cd36ad --- /dev/null +++ b/lib/executor/execution_test.go @@ -0,0 +1,173 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "io/ioutil" + "math/rand" + "sync" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/testutils" + "github.com/loadimpact/k6/lib/testutils/minirunner" +) + +func TestExecutionStateVUIDs(t *testing.T) { + t.Parallel() + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 0, 0) + assert.Equal(t, uint64(1), es.GetUniqueVUIdentifier()) + assert.Equal(t, uint64(2), es.GetUniqueVUIdentifier()) + assert.Equal(t, uint64(3), es.GetUniqueVUIdentifier()) + wg := sync.WaitGroup{} + rand.Seed(time.Now().UnixNano()) + count := 100 + rand.Intn(50) + wg.Add(count) + for i := 0; i < count; i++ { + go func() { + es.GetUniqueVUIdentifier() + wg.Done() + }() + } + wg.Wait() + assert.Equal(t, uint64(4+count), es.GetUniqueVUIdentifier()) +} + +func TestExecutionStateGettingVUsWhenNonAreAvailable(t *testing.T) { + t.Parallel() + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 0, 0) + logHook := &testutils.SimpleLogrusHook{HookedLevels: []logrus.Level{logrus.WarnLevel}} + testLog := logrus.New() + testLog.AddHook(logHook) + testLog.SetOutput(ioutil.Discard) + vu, err := es.GetPlannedVU(logrus.NewEntry(testLog), true) + require.Nil(t, vu) + require.Error(t, err) + require.Contains(t, err.Error(), "could not get a VU from the buffer in") + entries := logHook.Drain() + require.Equal(t, lib.MaxRetriesGetPlannedVU, len(entries)) + for _, entry := range entries { + require.Contains(t, entry.Message, "Could not get a VU from the buffer for ") + } +} + +func TestExecutionStateGettingVUs(t *testing.T) { + t.Parallel() + logHook := &testutils.SimpleLogrusHook{HookedLevels: []logrus.Level{logrus.WarnLevel, logrus.DebugLevel}} + testLog := logrus.New() + testLog.AddHook(logHook) + testLog.SetOutput(ioutil.Discard) + logEntry := logrus.NewEntry(testLog) + + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 20) + es.SetInitVUFunc(func(_ context.Context, _ *logrus.Entry) (lib.InitializedVU, error) { + return &minirunner.VU{}, nil + }) + + var vu lib.InitializedVU + for i := 0; i < 10; i++ { + require.EqualValues(t, i, es.GetInitializedVUsCount()) + vu, err = es.InitializeNewVU(context.Background(), logEntry) + require.NoError(t, err) + require.EqualValues(t, i+1, es.GetInitializedVUsCount()) + es.ReturnVU(vu, false) + require.EqualValues(t, 0, es.GetCurrentlyActiveVUsCount()) + require.EqualValues(t, i+1, es.GetInitializedVUsCount()) + } + + // Test getting initialized VUs is okay :) + for i := 0; i < 10; i++ { + require.EqualValues(t, i, es.GetCurrentlyActiveVUsCount()) + vu, err = es.GetPlannedVU(logEntry, true) + require.NoError(t, err) + require.Empty(t, logHook.Drain()) + require.NotNil(t, vu) + require.EqualValues(t, i+1, es.GetCurrentlyActiveVUsCount()) + require.EqualValues(t, 10, es.GetInitializedVUsCount()) + } + + // Check that getting 1 more planned VU will error out + vu, err = es.GetPlannedVU(logEntry, true) + require.Nil(t, vu) + require.Error(t, err) + require.Contains(t, err.Error(), "could not get a VU from the buffer in") + entries := logHook.Drain() + require.Equal(t, lib.MaxRetriesGetPlannedVU, len(entries)) + for _, entry := range entries { + require.Contains(t, entry.Message, "Could not get a VU from the buffer for ") + } + + // Test getting uninitialized vus will work + for i := 0; i < 10; i++ { + require.EqualValues(t, 10+i, es.GetInitializedVUsCount()) + vu, err = es.GetUnplannedVU(context.Background(), logEntry) + require.NoError(t, err) + require.Empty(t, logHook.Drain()) + require.NotNil(t, vu) + require.EqualValues(t, 10+i+1, es.GetInitializedVUsCount()) + require.EqualValues(t, 10, es.GetCurrentlyActiveVUsCount()) + } + + // Check that getting 1 more unplanned VU will error out + vu, err = es.GetUnplannedVU(context.Background(), logEntry) + require.Nil(t, vu) + require.Error(t, err) + require.Contains(t, err.Error(), "could not get a VU from the buffer in") + entries = logHook.Drain() + require.Equal(t, lib.MaxRetriesGetPlannedVU, len(entries)) + for _, entry := range entries { + require.Contains(t, entry.Message, "Could not get a VU from the buffer for ") + } +} + +func TestMarkStartedPanicsOnSecondRun(t *testing.T) { + t.Parallel() + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 0, 0) + require.False(t, es.HasStarted()) + es.MarkStarted() + require.True(t, es.HasStarted()) + require.Panics(t, es.MarkStarted) +} + +func TestMarkEnded(t *testing.T) { + t.Parallel() + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 0, 0) + require.False(t, es.HasEnded()) + es.MarkEnded() + require.True(t, es.HasEnded()) + require.Panics(t, es.MarkEnded) +} diff --git a/lib/executor/executors_test.go b/lib/executor/executors_test.go new file mode 100644 index 00000000000..222ccda8e02 --- /dev/null +++ b/lib/executor/executors_test.go @@ -0,0 +1,441 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/types" +) + +type exp struct { + parseError bool + validationError bool + custom func(t *testing.T, cm lib.ScenarioConfigs) +} + +type configMapTestCase struct { + rawJSON string + expected exp +} + +//nolint:gochecknoglobals +var configMapTestCases = []configMapTestCase{ + {"", exp{parseError: true}}, + {"1234", exp{parseError: true}}, + {"asdf", exp{parseError: true}}, + {"'adsf'", exp{parseError: true}}, + {"[]", exp{parseError: true}}, + {"{}", exp{custom: func(t *testing.T, cm lib.ScenarioConfigs) { + assert.Equal(t, cm, lib.ScenarioConfigs{}) + }}}, + {"{}asdf", exp{parseError: true}}, + {"null", exp{custom: func(t *testing.T, cm lib.ScenarioConfigs) { + assert.Nil(t, cm) + }}}, + {`{"someKey": {}}`, exp{parseError: true}}, + {`{"someKey": {"executor": "constant-blah-blah", "vus": 10, "duration": "60s"}}`, exp{parseError: true}}, + {`{"someKey": {"executor": "constant-vus", "uknownField": "should_error"}}`, exp{parseError: true}}, + {`{"someKey": {"executor": "constant-vus", "vus": 10, "duration": "60s", "env": 123}}`, exp{parseError: true}}, + + // Validation errors for constant-vus and the base config + {`{"someKey": {"executor": "constant-vus", "vus": 10, "duration": "60s", + "gracefulStop": "10s", "startTime": "70s", "env": {"test": "mest"}, "exec": "someFunc"}}`, + exp{custom: func(t *testing.T, cm lib.ScenarioConfigs) { + sched := NewConstantVUsConfig("someKey") + sched.VUs = null.IntFrom(10) + sched.Duration = types.NullDurationFrom(1 * time.Minute) + sched.GracefulStop = types.NullDurationFrom(10 * time.Second) + sched.StartTime = types.NullDurationFrom(70 * time.Second) + sched.Exec = null.StringFrom("someFunc") + sched.Env = map[string]string{"test": "mest"} + require.Equal(t, cm, lib.ScenarioConfigs{"someKey": sched}) + require.Equal(t, sched.BaseConfig.Name, cm["someKey"].GetName()) + require.Equal(t, sched.BaseConfig.Type, cm["someKey"].GetType()) + require.Equal(t, sched.BaseConfig.GetGracefulStop(), cm["someKey"].GetGracefulStop()) + require.Equal(t, + sched.BaseConfig.StartTime.Duration, + types.Duration(cm["someKey"].GetStartTime()), + ) + require.Equal(t, sched.BaseConfig.Env, cm["someKey"].GetEnv()) + + assert.Empty(t, cm["someKey"].Validate()) + assert.Empty(t, cm.Validate()) + + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + assert.Equal(t, "10 looping VUs for 1m0s (exec: someFunc, startTime: 1m10s, gracefulStop: 10s)", cm["someKey"].GetDescription(et)) + + schedReqs := cm["someKey"].GetExecutionRequirements(et) + endOffset, isFinal := lib.GetEndOffset(schedReqs) + assert.Equal(t, 70*time.Second, endOffset) + assert.Equal(t, true, isFinal) + assert.Equal(t, uint64(10), lib.GetMaxPlannedVUs(schedReqs)) + assert.Equal(t, uint64(10), lib.GetMaxPossibleVUs(schedReqs)) + + totalReqs := cm.GetFullExecutionRequirements(et) + endOffset, isFinal = lib.GetEndOffset(totalReqs) + assert.Equal(t, 140*time.Second, endOffset) + assert.Equal(t, true, isFinal) + assert.Equal(t, uint64(10), lib.GetMaxPlannedVUs(schedReqs)) + assert.Equal(t, uint64(10), lib.GetMaxPossibleVUs(schedReqs)) + + }}, + }, + {`{"aname": {"executor": "constant-vus", "duration": "60s"}}`, exp{}}, + {`{"": {"executor": "constant-vus", "vus": 10, "duration": "60s"}}`, exp{validationError: true}}, + {`{"aname": {"executor": "constant-vus"}}`, exp{validationError: true}}, + {`{"aname": {"executor": "constant-vus", "vus": 0.5}}`, exp{parseError: true}}, + {`{"aname": {"executor": "constant-vus", "vus": 10}}`, exp{validationError: true}}, + {`{"aname": {"executor": "constant-vus", "vus": 0, "duration": "60s"}}`, exp{validationError: true}}, + {`{"aname": {"executor": "constant-vus", "vus": -1, "duration": "60s"}}`, exp{validationError: true}}, + {`{"aname": {"executor": "constant-vus", "vus": 10, "duration": "0s"}}`, exp{validationError: true}}, + {`{"aname": {"executor": "constant-vus", "vus": 10, "duration": "10s", "startTime": "-10s"}}`, exp{validationError: true}}, + {`{"aname": {"executor": "constant-vus", "vus": 10, "duration": "10s", "exec": ""}}`, exp{validationError: true}}, + {`{"aname": {"executor": "constant-vus", "vus": 10, "duration": "10s", "gracefulStop": "-2s"}}`, exp{validationError: true}}, + // ramping-vus + {`{"varloops": {"executor": "ramping-vus", "startVUs": 20, "gracefulStop": "15s", "gracefulRampDown": "10s", + "startTime": "23s", "stages": [{"duration": "60s", "target": 30}, {"duration": "130s", "target": 10}]}}`, + exp{custom: func(t *testing.T, cm lib.ScenarioConfigs) { + sched := NewRampingVUsConfig("varloops") + sched.GracefulStop = types.NullDurationFrom(15 * time.Second) + sched.GracefulRampDown = types.NullDurationFrom(10 * time.Second) + sched.StartVUs = null.IntFrom(20) + sched.StartTime = types.NullDurationFrom(23 * time.Second) + sched.Stages = []Stage{ + {Target: null.IntFrom(30), Duration: types.NullDurationFrom(60 * time.Second)}, + {Target: null.IntFrom(10), Duration: types.NullDurationFrom(130 * time.Second)}, + } + require.Equal(t, cm, lib.ScenarioConfigs{"varloops": sched}) + + assert.Empty(t, cm["varloops"].Validate()) + assert.Empty(t, cm.Validate()) + + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + assert.Equal(t, "Up to 30 looping VUs for 3m10s over 2 stages (gracefulRampDown: 10s, startTime: 23s, gracefulStop: 15s)", cm["varloops"].GetDescription(et)) + + schedReqs := cm["varloops"].GetExecutionRequirements(et) + endOffset, isFinal := lib.GetEndOffset(schedReqs) + assert.Equal(t, 205*time.Second, endOffset) + assert.Equal(t, true, isFinal) + assert.Equal(t, uint64(30), lib.GetMaxPlannedVUs(schedReqs)) + assert.Equal(t, uint64(30), lib.GetMaxPossibleVUs(schedReqs)) + + totalReqs := cm.GetFullExecutionRequirements(et) + endOffset, isFinal = lib.GetEndOffset(totalReqs) + assert.Equal(t, 228*time.Second, endOffset) + assert.Equal(t, true, isFinal) + assert.Equal(t, uint64(30), lib.GetMaxPlannedVUs(schedReqs)) + assert.Equal(t, uint64(30), lib.GetMaxPossibleVUs(schedReqs)) + }}, + }, + {`{"varloops": {"executor": "ramping-vus", "startVUs": 1, "gracefulStop": "0s", "gracefulRampDown": "10s", + "stages": [{"duration": "10s", "target": 10}]}}`, + exp{custom: func(t *testing.T, cm lib.ScenarioConfigs) { + assert.Empty(t, cm["varloops"].Validate()) + assert.Empty(t, cm.Validate()) + + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + assert.Equal(t, "Up to 10 looping VUs for 10s over 1 stages (gracefulRampDown: 10s)", cm["varloops"].GetDescription(et)) + + schedReqs := cm["varloops"].GetExecutionRequirements(et) + assert.Equal(t, uint64(10), lib.GetMaxPlannedVUs(schedReqs)) + assert.Equal(t, uint64(10), lib.GetMaxPossibleVUs(schedReqs)) + }}, + }, + {`{"varloops": {"executor": "ramping-vus", "startVUs": 1, "gracefulStop": "0s", "gracefulRampDown": "0s", + "stages": [{"duration": "10s", "target": 10}, {"duration": "0s", "target": 1}, {"duration": "10s", "target": 5}]}}`, + exp{custom: func(t *testing.T, cm lib.ScenarioConfigs) { + assert.Empty(t, cm["varloops"].Validate()) + assert.Empty(t, cm.Validate()) + + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + assert.Equal(t, "Up to 10 looping VUs for 20s over 3 stages (gracefulRampDown: 0s)", cm["varloops"].GetDescription(et)) + + schedReqs := cm.GetFullExecutionRequirements(et) + assert.Equal(t, uint64(10), lib.GetMaxPlannedVUs(schedReqs)) + assert.Equal(t, uint64(10), lib.GetMaxPossibleVUs(schedReqs)) + }}, + }, + {`{"varloops": {"executor": "ramping-vus", "startVUs": 1, "gracefulStop": "0s", "gracefulRampDown": "0s", + "stages": [{"duration": "10s", "target": 10}, {"duration": "0s", "target": 11},{"duration": "0s", "target": 1}, {"duration": "10s", "target": 5}]}}`, + exp{custom: func(t *testing.T, cm lib.ScenarioConfigs) { + assert.Empty(t, cm["varloops"].Validate()) + assert.Empty(t, cm.Validate()) + + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + assert.Equal(t, "Up to 11 looping VUs for 20s over 4 stages (gracefulRampDown: 0s)", cm["varloops"].GetDescription(et)) + + schedReqs := cm.GetFullExecutionRequirements(et) + assert.Equal(t, uint64(11), lib.GetMaxPlannedVUs(schedReqs)) + assert.Equal(t, uint64(11), lib.GetMaxPossibleVUs(schedReqs)) + }}, + }, + {`{"varloops": {"executor": "ramping-vus", "startVUs": 0, "stages": [{"duration": "60s", "target": 0}]}}`, exp{}}, + {`{"varloops": {"executor": "ramping-vus", "startVUs": -1, "stages": [{"duration": "60s", "target": 30}]}}`, exp{validationError: true}}, + {`{"varloops": {"executor": "ramping-vus", "startVUs": 2, "stages": [{"duration": "-60s", "target": 30}]}}`, exp{validationError: true}}, + {`{"varloops": {"executor": "ramping-vus", "startVUs": 2, "stages": [{"duration": "60s", "target": -30}]}}`, exp{validationError: true}}, + {`{"varloops": {"executor": "ramping-vus", "stages": [{"duration": "60s"}]}}`, exp{validationError: true}}, + {`{"varloops": {"executor": "ramping-vus", "stages": [{"target": 30}]}}`, exp{validationError: true}}, + {`{"varloops": {"executor": "ramping-vus", "stages": []}}`, exp{validationError: true}}, + {`{"varloops": {"executor": "ramping-vus"}}`, exp{validationError: true}}, + // shared-iterations + {`{"ishared": {"executor": "shared-iterations", "iterations": 22, "vus": 12, "maxDuration": "100s"}}`, + exp{custom: func(t *testing.T, cm lib.ScenarioConfigs) { + sched := NewSharedIterationsConfig("ishared") + sched.Iterations = null.IntFrom(22) + sched.MaxDuration = types.NullDurationFrom(100 * time.Second) + sched.VUs = null.IntFrom(12) + + assert.Empty(t, cm["ishared"].Validate()) + assert.Empty(t, cm.Validate()) + + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + assert.Equal(t, "22 iterations shared among 12 VUs (maxDuration: 1m40s, gracefulStop: 30s)", cm["ishared"].GetDescription(et)) + + schedReqs := cm["ishared"].GetExecutionRequirements(et) + endOffset, isFinal := lib.GetEndOffset(schedReqs) + assert.Equal(t, 130*time.Second, endOffset) + assert.Equal(t, true, isFinal) + assert.Equal(t, uint64(12), lib.GetMaxPlannedVUs(schedReqs)) + assert.Equal(t, uint64(12), lib.GetMaxPossibleVUs(schedReqs)) + + totalReqs := cm.GetFullExecutionRequirements(et) + assert.Equal(t, schedReqs, totalReqs) + + et = mustNewExecutionTuple(newExecutionSegmentFromString("0:1/3"), newExecutionSegmentSequenceFromString("0,1/3,2/3,1")) + assert.Equal(t, "8 iterations shared among 4 VUs (maxDuration: 1m40s, gracefulStop: 30s)", cm["ishared"].GetDescription(et)) + + schedReqs = cm["ishared"].GetExecutionRequirements(et) + endOffset, isFinal = lib.GetEndOffset(schedReqs) + assert.Equal(t, 130*time.Second, endOffset) + assert.Equal(t, true, isFinal) + assert.Equal(t, uint64(4), lib.GetMaxPlannedVUs(schedReqs)) + assert.Equal(t, uint64(4), lib.GetMaxPossibleVUs(schedReqs)) + + totalReqs = cm.GetFullExecutionRequirements(et) + assert.Equal(t, schedReqs, totalReqs) + + et = mustNewExecutionTuple(newExecutionSegmentFromString("1/3:2/3"), newExecutionSegmentSequenceFromString("0,1/3,2/3,1")) + assert.Equal(t, "7 iterations shared among 4 VUs (maxDuration: 1m40s, gracefulStop: 30s)", cm["ishared"].GetDescription(et)) + + schedReqs = cm["ishared"].GetExecutionRequirements(et) + endOffset, isFinal = lib.GetEndOffset(schedReqs) + assert.Equal(t, 130*time.Second, endOffset) + assert.Equal(t, true, isFinal) + assert.Equal(t, uint64(4), lib.GetMaxPlannedVUs(schedReqs)) + assert.Equal(t, uint64(4), lib.GetMaxPossibleVUs(schedReqs)) + + totalReqs = cm.GetFullExecutionRequirements(et) + assert.Equal(t, schedReqs, totalReqs) + + et = mustNewExecutionTuple(newExecutionSegmentFromString("12/13:1"), + newExecutionSegmentSequenceFromString("0,1/13,2/13,3/13,4/13,5/13,6/13,7/13,8/13,9/13,10/13,11/13,12/13,1")) + assert.Equal(t, "0 iterations shared among 0 VUs (maxDuration: 1m40s, gracefulStop: 30s)", cm["ishared"].GetDescription(et)) + + schedReqs = cm["ishared"].GetExecutionRequirements(et) + endOffset, isFinal = lib.GetEndOffset(schedReqs) + assert.Equal(t, time.Duration(0), endOffset) + assert.Equal(t, true, isFinal) + assert.Equal(t, uint64(0), lib.GetMaxPlannedVUs(schedReqs)) + assert.Equal(t, uint64(0), lib.GetMaxPossibleVUs(schedReqs)) + + totalReqs = cm.GetFullExecutionRequirements(et) + assert.Equal(t, schedReqs, totalReqs) + }}, + }, + {`{"ishared": {"executor": "shared-iterations"}}`, exp{}}, // Has 1 VU & 1 iter default values + {`{"ishared": {"executor": "shared-iterations", "iterations": 20}}`, exp{}}, + {`{"ishared": {"executor": "shared-iterations", "vus": 10}}`, exp{validationError: true}}, // error because VUs are more than iters + {`{"ishared": {"executor": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "30m"}}`, exp{}}, + {`{"ishared": {"executor": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "-3m"}}`, exp{validationError: true}}, + {`{"ishared": {"executor": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "0s"}}`, exp{validationError: true}}, + {`{"ishared": {"executor": "shared-iterations", "iterations": 20, "vus": -10}}`, exp{validationError: true}}, + {`{"ishared": {"executor": "shared-iterations", "iterations": -1, "vus": 1}}`, exp{validationError: true}}, + {`{"ishared": {"executor": "shared-iterations", "iterations": 20, "vus": 30}}`, exp{validationError: true}}, + // per-vu-iterations + {`{"ipervu": {"executor": "per-vu-iterations", "iterations": 23, "vus": 13, "gracefulStop": 0}}`, + exp{custom: func(t *testing.T, cm lib.ScenarioConfigs) { + sched := NewPerVUIterationsConfig("ipervu") + sched.Iterations = null.IntFrom(23) + sched.GracefulStop = types.NullDurationFrom(0) + sched.VUs = null.IntFrom(13) + + assert.Empty(t, cm["ipervu"].Validate()) + assert.Empty(t, cm.Validate()) + + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + assert.Equal(t, "23 iterations for each of 13 VUs (maxDuration: 10m0s)", cm["ipervu"].GetDescription(et)) + + schedReqs := cm["ipervu"].GetExecutionRequirements(et) + endOffset, isFinal := lib.GetEndOffset(schedReqs) + assert.Equal(t, 600*time.Second, endOffset) + assert.Equal(t, true, isFinal) + assert.Equal(t, uint64(13), lib.GetMaxPlannedVUs(schedReqs)) + assert.Equal(t, uint64(13), lib.GetMaxPossibleVUs(schedReqs)) + + totalReqs := cm.GetFullExecutionRequirements(et) + assert.Equal(t, schedReqs, totalReqs) + }}, + }, + {`{"ipervu": {"executor": "per-vu-iterations"}}`, exp{}}, // Has 1 VU & 1 iter default values + {`{"ipervu": {"executor": "per-vu-iterations", "iterations": 20}}`, exp{}}, + {`{"ipervu": {"executor": "per-vu-iterations", "vus": 10}}`, exp{}}, + {`{"ipervu": {"executor": "per-vu-iterations", "iterations": 20, "vus": 10}}`, exp{}}, + {`{"ipervu": {"executor": "per-vu-iterations", "iterations": 20, "vus": 10, "maxDuration": "-3m"}}`, exp{validationError: true}}, + {`{"ipervu": {"executor": "per-vu-iterations", "iterations": 20, "vus": 10, "maxDuration": "0s"}}`, exp{validationError: true}}, + {`{"ipervu": {"executor": "per-vu-iterations", "iterations": 20, "vus": -10}}`, exp{validationError: true}}, + {`{"ipervu": {"executor": "per-vu-iterations", "iterations": -1, "vus": 1}}`, exp{validationError: true}}, + + // constant-arrival-rate + {`{"carrival": {"executor": "constant-arrival-rate", "rate": 30, "timeUnit": "1m", "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, + exp{custom: func(t *testing.T, cm lib.ScenarioConfigs) { + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + sched := NewConstantArrivalRateConfig("carrival") + sched.Rate = null.IntFrom(30) + sched.Duration = types.NullDurationFrom(10 * time.Minute) + sched.TimeUnit = types.NullDurationFrom(1 * time.Minute) + sched.PreAllocatedVUs = null.IntFrom(20) + sched.MaxVUs = null.IntFrom(30) + + assert.Empty(t, cm["carrival"].Validate()) + assert.Empty(t, cm.Validate()) + + assert.Equal(t, "0.50 iterations/s for 10m0s (maxVUs: 20-30, gracefulStop: 30s)", cm["carrival"].GetDescription(et)) + + schedReqs := cm["carrival"].GetExecutionRequirements(et) + endOffset, isFinal := lib.GetEndOffset(schedReqs) + assert.Equal(t, 630*time.Second, endOffset) + assert.Equal(t, true, isFinal) + assert.Equal(t, uint64(20), lib.GetMaxPlannedVUs(schedReqs)) + assert.Equal(t, uint64(30), lib.GetMaxPossibleVUs(schedReqs)) + + totalReqs := cm.GetFullExecutionRequirements(et) + assert.Equal(t, schedReqs, totalReqs) + }}, + }, + {`{"carrival": {"executor": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, exp{}}, + {`{"carrival": {"executor": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30, "timeUnit": "-1s"}}`, exp{validationError: true}}, + {`{"carrival": {"executor": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20}}`, + exp{custom: func(t *testing.T, cm lib.ScenarioConfigs) { + assert.Empty(t, cm["carrival"].Validate()) + require.EqualValues(t, 20, cm["carrival"].(*ConstantArrivalRateConfig).MaxVUs.Int64) + }}, + }, + {`{"carrival": {"executor": "constant-arrival-rate", "rate": 10, "duration": "10m", "maxVUs": 30}}`, exp{validationError: true}}, + {`{"carrival": {"executor": "constant-arrival-rate", "rate": 10, "preAllocatedVUs": 20, "maxVUs": 30}}`, exp{validationError: true}}, + {`{"carrival": {"executor": "constant-arrival-rate", "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, exp{validationError: true}}, + {`{"carrival": {"executor": "constant-arrival-rate", "rate": 10, "duration": "0m", "preAllocatedVUs": 20, "maxVUs": 30}}`, exp{validationError: true}}, + {`{"carrival": {"executor": "constant-arrival-rate", "rate": 0, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, exp{validationError: true}}, + {`{"carrival": {"executor": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 15}}`, exp{validationError: true}}, + {`{"carrival": {"executor": "constant-arrival-rate", "rate": 10, "duration": "0s", "preAllocatedVUs": 20, "maxVUs": 25}}`, exp{validationError: true}}, + {`{"carrival": {"executor": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": -2, "maxVUs": 25}}`, exp{validationError: true}}, + // ramping-arrival-rate + {`{"varrival": {"executor": "ramping-arrival-rate", "startRate": 10, "timeUnit": "30s", "preAllocatedVUs": 20, + "maxVUs": 50, "stages": [{"duration": "3m", "target": 30}, {"duration": "5m", "target": 10}]}}`, + exp{custom: func(t *testing.T, cm lib.ScenarioConfigs) { + sched := NewRampingArrivalRateConfig("varrival") + sched.StartRate = null.IntFrom(10) + sched.Stages = []Stage{ + {Target: null.IntFrom(30), Duration: types.NullDurationFrom(180 * time.Second)}, + {Target: null.IntFrom(10), Duration: types.NullDurationFrom(300 * time.Second)}, + } + sched.TimeUnit = types.NullDurationFrom(30 * time.Second) + sched.PreAllocatedVUs = null.IntFrom(20) + sched.MaxVUs = null.IntFrom(50) + require.Equal(t, cm, lib.ScenarioConfigs{"varrival": sched}) + + assert.Empty(t, cm["varrival"].Validate()) + assert.Empty(t, cm.Validate()) + + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + assert.Equal(t, "Up to 1.00 iterations/s for 8m0s over 2 stages (maxVUs: 20-50, gracefulStop: 30s)", cm["varrival"].GetDescription(et)) + + schedReqs := cm["varrival"].GetExecutionRequirements(et) + endOffset, isFinal := lib.GetEndOffset(schedReqs) + assert.Equal(t, 510*time.Second, endOffset) + assert.Equal(t, true, isFinal) + assert.Equal(t, uint64(20), lib.GetMaxPlannedVUs(schedReqs)) + assert.Equal(t, uint64(50), lib.GetMaxPossibleVUs(schedReqs)) + + totalReqs := cm.GetFullExecutionRequirements(et) + assert.Equal(t, schedReqs, totalReqs) + }}, + }, + {`{"varrival": {"executor": "ramping-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, exp{}}, + {`{"varrival": {"executor": "ramping-arrival-rate", "preAllocatedVUs": -20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, exp{validationError: true}}, + {`{"varrival": {"executor": "ramping-arrival-rate", "startRate": -1, "preAllocatedVUs": 20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, exp{validationError: true}}, + {`{"varrival": {"executor": "ramping-arrival-rate", "preAllocatedVUs": 20, "stages": [{"duration": "5m", "target": 10}]}}`, + exp{custom: func(t *testing.T, cm lib.ScenarioConfigs) { + assert.Empty(t, cm["varrival"].Validate()) + require.EqualValues(t, 20, cm["varrival"].(*RampingArrivalRateConfig).MaxVUs.Int64) + }}, + }, + {`{"varrival": {"executor": "ramping-arrival-rate", "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, exp{validationError: true}}, + {`{"varrival": {"executor": "ramping-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50}}`, exp{validationError: true}}, + {`{"varrival": {"executor": "ramping-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50, "stages": []}}`, exp{validationError: true}}, + {`{"varrival": {"executor": "ramping-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}], "timeUnit": "-1s"}}`, exp{validationError: true}}, + {`{"varrival": {"executor": "ramping-arrival-rate", "preAllocatedVUs": 30, "maxVUs": 20, "stages": [{"duration": "5m", "target": 10}]}}`, exp{validationError: true}}, + //TODO: more tests of mixed executors and execution plans +} + +func TestConfigMapParsingAndValidation(t *testing.T) { + t.Parallel() + for i, tc := range configMapTestCases { + tc := tc + t.Run(fmt.Sprintf("TestCase#%d", i), func(t *testing.T) { + t.Logf(tc.rawJSON) + var result lib.ScenarioConfigs + err := json.Unmarshal([]byte(tc.rawJSON), &result) + if tc.expected.parseError { + require.Error(t, err) + return + } + require.NoError(t, err) + + parseErrors := result.Validate() + if tc.expected.validationError { + assert.NotEmpty(t, parseErrors) + } else { + assert.Empty(t, parseErrors) + } + if tc.expected.custom != nil { + tc.expected.custom(t, result) + } + }) + } +} diff --git a/lib/executor/externally_controlled.go b/lib/executor/externally_controlled.go new file mode 100644 index 00000000000..643132ba2c4 --- /dev/null +++ b/lib/executor/externally_controlled.go @@ -0,0 +1,597 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "errors" + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats" + "github.com/loadimpact/k6/ui/pb" +) + +const externallyControlledType = "externally-controlled" + +func init() { + lib.RegisterExecutorConfigType( + externallyControlledType, + func(name string, rawJSON []byte) (lib.ExecutorConfig, error) { + config := ExternallyControlledConfig{BaseConfig: NewBaseConfig(name, externallyControlledType)} + err := lib.StrictJSONUnmarshal(rawJSON, &config) + if err != nil { + return config, err + } + if !config.MaxVUs.Valid { + config.MaxVUs = config.VUs + } + return config, nil + }, + ) +} + +// ExternallyControlledConfigParams contains all of the options that actually +// determine the scheduling of VUs in the externally controlled executor. +type ExternallyControlledConfigParams struct { + VUs null.Int `json:"vus"` + Duration types.NullDuration `json:"duration"` // 0 is a valid value, meaning infinite duration + MaxVUs null.Int `json:"maxVUs"` +} + +// Validate just checks the control options in isolation. +func (mecc ExternallyControlledConfigParams) Validate() (errors []error) { + if mecc.VUs.Int64 < 0 { + errors = append(errors, fmt.Errorf("the number of VUs shouldn't be negative")) + } + + if mecc.MaxVUs.Int64 < mecc.VUs.Int64 { + errors = append(errors, fmt.Errorf( + "the number of active VUs (%d) must be less than or equal to the number of maxVUs (%d)", + mecc.VUs.Int64, mecc.MaxVUs.Int64, + )) + } + + if !mecc.Duration.Valid { + errors = append(errors, fmt.Errorf("the duration should be specified, for infinite duration use 0")) + } else if time.Duration(mecc.Duration.Duration) < 0 { + errors = append(errors, fmt.Errorf( + "the duration shouldn't be negative, for infinite duration use 0", + )) + } + + return errors +} + +// ExternallyControlledConfig stores the number of currently active VUs, the max +// number of VUs and the executor duration. The duration can be 0, which means +// "infinite duration", i.e. the user has to manually abort the script. +type ExternallyControlledConfig struct { + BaseConfig + ExternallyControlledConfigParams +} + +// Make sure we implement the lib.ExecutorConfig interface +var _ lib.ExecutorConfig = &ExternallyControlledConfig{} + +// GetDescription returns a human-readable description of the executor options +func (mec ExternallyControlledConfig) GetDescription(_ *lib.ExecutionTuple) string { + duration := "infinite" + if mec.Duration.Duration != 0 { + duration = mec.Duration.String() + } + return fmt.Sprintf( + "Externally controlled execution with %d VUs, %d max VUs, %s duration", + mec.VUs.Int64, mec.MaxVUs.Int64, duration, + ) +} + +// Validate makes sure all options are configured and valid +func (mec ExternallyControlledConfig) Validate() []error { + errors := append(mec.BaseConfig.Validate(), mec.ExternallyControlledConfigParams.Validate()...) + if mec.GracefulStop.Valid { + errors = append(errors, fmt.Errorf( + "gracefulStop is not supported by the externally controlled executor", + )) + } + return errors +} + +// GetExecutionRequirements reserves the configured number of max VUs for the +// whole duration of the executor, so these VUs can be externally initialized in +// the beginning of the test. +// +// Importantly, if 0 (i.e. infinite) duration is configured, this executor +// doesn't emit the last step to relinquish these VUs. +// +// Also, the externally controlled executor doesn't set MaxUnplannedVUs in the +// returned steps, since their initialization and usage is directly controlled +// by the user, can be changed during the test runtime, and is effectively +// bounded only by the resources of the machine k6 is running on. +// +// This is not a problem, because the MaxUnplannedVUs are mostly meant to be +// used for calculating the maximum possible number of initialized VUs at any +// point during a test run. That's used for sizing purposes and for user qouta +// checking in the cloud execution, where the externally controlled executor +// isn't supported. +func (mec ExternallyControlledConfig) GetExecutionRequirements(et *lib.ExecutionTuple) []lib.ExecutionStep { + startVUs := lib.ExecutionStep{ + TimeOffset: 0, + PlannedVUs: uint64(et.Segment.Scale(mec.MaxVUs.Int64)), // user-configured, VUs to be pre-initialized + MaxUnplannedVUs: 0, // intentional, see function comment + } + + maxDuration := time.Duration(mec.Duration.Duration) + if maxDuration == 0 { + // Infinite duration, don't emit 0 VUs at the end since there's no planned end + return []lib.ExecutionStep{startVUs} + } + return []lib.ExecutionStep{startVUs, { + TimeOffset: maxDuration, + PlannedVUs: 0, + MaxUnplannedVUs: 0, // intentional, see function comment + }} +} + +// IsDistributable simply returns false because there's no way to reliably +// distribute the externally controlled executor. +func (ExternallyControlledConfig) IsDistributable() bool { + return false +} + +// NewExecutor creates a new ExternallyControlled executor +func (mec ExternallyControlledConfig) NewExecutor(es *lib.ExecutionState, logger *logrus.Entry) (lib.Executor, error) { + return &ExternallyControlled{ + BaseExecutor: NewBaseExecutor(mec, es, logger), + config: mec, + currentControlConfig: mec.ExternallyControlledConfigParams, + configLock: &sync.RWMutex{}, + newControlConfigs: make(chan updateConfigEvent), + pauseEvents: make(chan pauseEvent), + hasStarted: make(chan struct{}), + }, nil +} + +// HasWork reports whether there is any work to be done for the given execution segment. +func (mec ExternallyControlledConfig) HasWork(_ *lib.ExecutionTuple) bool { + // We can always initialize new VUs via the REST API, so return true. + return true +} + +type pauseEvent struct { + isPaused bool + err chan error +} + +type updateConfigEvent struct { + newConfig ExternallyControlledConfigParams + err chan error +} + +// ExternallyControlled is an implementation of the old k6 executor that could be +// controlled externally, via the k6 REST API. It implements both the +// lib.PausableExecutor and the lib.LiveUpdatableExecutor interfaces. +type ExternallyControlled struct { + *BaseExecutor + config ExternallyControlledConfig + currentControlConfig ExternallyControlledConfigParams + configLock *sync.RWMutex + newControlConfigs chan updateConfigEvent + pauseEvents chan pauseEvent + hasStarted chan struct{} +} + +// Make sure we implement all the interfaces +var ( + _ lib.Executor = &ExternallyControlled{} + _ lib.PausableExecutor = &ExternallyControlled{} + _ lib.LiveUpdatableExecutor = &ExternallyControlled{} +) + +// GetCurrentConfig just returns the executor's current configuration. +func (mex *ExternallyControlled) GetCurrentConfig() ExternallyControlledConfig { + mex.configLock.RLock() + defer mex.configLock.RUnlock() + return ExternallyControlledConfig{ + BaseConfig: mex.config.BaseConfig, + ExternallyControlledConfigParams: mex.currentControlConfig, + } +} + +// GetConfig just returns the executor's current configuration, it's basically +// an alias of GetCurrentConfig that implements the more generic interface. +func (mex *ExternallyControlled) GetConfig() lib.ExecutorConfig { + return mex.GetCurrentConfig() +} + +// GetProgress just returns the executor's progress bar instance. +func (mex ExternallyControlled) GetProgress() *pb.ProgressBar { + mex.configLock.RLock() + defer mex.configLock.RUnlock() + return mex.progress +} + +// GetLogger just returns the executor's logger instance. +func (mex ExternallyControlled) GetLogger() *logrus.Entry { + mex.configLock.RLock() + defer mex.configLock.RUnlock() + return mex.logger +} + +// Init doesn't do anything... +func (mex ExternallyControlled) Init(ctx context.Context) error { + return nil +} + +// SetPaused pauses or resumes the executor. +func (mex *ExternallyControlled) SetPaused(paused bool) error { + select { + case <-mex.hasStarted: + event := pauseEvent{isPaused: paused, err: make(chan error)} + mex.pauseEvents <- event + return <-event.err + default: + return fmt.Errorf("cannot pause the externally controlled executor before it has started") + } +} + +// UpdateConfig validates the supplied config and updates it in real time. It is +// possible to update the configuration even when k6 is paused, either in the +// beginning (i.e. when running k6 with --paused) or in the middle of the script +// execution. +func (mex *ExternallyControlled) UpdateConfig(ctx context.Context, newConf interface{}) error { + newConfigParams, ok := newConf.(ExternallyControlledConfigParams) + if !ok { + return errors.New("invalid config type") + } + if errs := newConfigParams.Validate(); len(errs) != 0 { + return fmt.Errorf("invalid configuration supplied: %s", lib.ConcatErrors(errs, ", ")) + } + + if newConfigParams.Duration.Valid && newConfigParams.Duration != mex.config.Duration { + return fmt.Errorf("the externally controlled executor duration cannot be changed") + } + if newConfigParams.MaxVUs.Valid && newConfigParams.MaxVUs.Int64 < mex.config.MaxVUs.Int64 { + // This limitation is because the externally controlled executor is + // still an executor that participates in the overall k6 scheduling. + // Thus, any VUs that were explicitly specified by the user in the + // config may be reused from or by other executors. + return fmt.Errorf( + "the new number of max VUs cannot be lower than the starting number of max VUs (%d)", + mex.config.MaxVUs.Int64, + ) + } + + mex.configLock.Lock() // guard against a simultaneous start of the test (which will close hasStarted) + select { + case <-mex.hasStarted: + mex.configLock.Unlock() + event := updateConfigEvent{newConfig: newConfigParams, err: make(chan error)} + mex.newControlConfigs <- event + return <-event.err + case <-ctx.Done(): + mex.configLock.Unlock() + return ctx.Err() + default: + mex.currentControlConfig = newConfigParams + mex.configLock.Unlock() + return nil + } +} + +// This is a helper function that is used in run for non-infinite durations. +func (mex *ExternallyControlled) stopWhenDurationIsReached(ctx context.Context, duration time.Duration, cancel func()) { + ctxDone := ctx.Done() + checkInterval := time.NewTicker(100 * time.Millisecond) + for { + select { + case <-ctxDone: + checkInterval.Stop() + return + + // TODO: something saner and more optimized that sleeps for pauses and + // doesn't depend on the global execution state? + case <-checkInterval.C: + elapsed := mex.executionState.GetCurrentTestRunDuration() - time.Duration(mex.config.StartTime.Duration) + if elapsed >= duration { + cancel() + return + } + } + } +} + +// manualVUHandle is a wrapper around the vuHandle helper, used in the +// ramping-vus executor. Here, instead of using its getVU and returnVU +// methods to retrieve and return a VU from the global buffer, we use them to +// accurately update the local and global active VU counters and to ensure that +// the pausing and reducing VUs operations wait for VUs to fully finish +// executing their current iterations before returning. +type manualVUHandle struct { + *vuHandle + initVU lib.InitializedVU + wg *sync.WaitGroup + + // This is the cancel of the local context, used to kill its goroutine when + // we reduce the number of MaxVUs, so that the Go GC can clean up the VU. + cancelVU func() +} + +func (rs *externallyControlledRunState) newManualVUHandle( + initVU lib.InitializedVU, logger *logrus.Entry, +) *manualVUHandle { + wg := sync.WaitGroup{} + state := rs.executor.executionState + getVU := func() (lib.InitializedVU, error) { + wg.Add(1) + state.ModCurrentlyActiveVUsCount(+1) + atomic.AddInt64(rs.activeVUsCount, +1) + return initVU, nil + } + returnVU := func(_ lib.InitializedVU) { + state.ModCurrentlyActiveVUsCount(-1) + atomic.AddInt64(rs.activeVUsCount, -1) + wg.Done() + } + ctx, cancel := context.WithCancel(rs.ctx) + return &manualVUHandle{ + vuHandle: newStoppedVUHandle(ctx, getVU, returnVU, &rs.executor.config.BaseConfig, logger), + initVU: initVU, + wg: &wg, + cancelVU: cancel, + } +} + +// externallyControlledRunState is created and initialized by the Run() method +// of the externally controlled executor. It is used to track and modify various +// details of the execution, including handling of live config changes. +type externallyControlledRunState struct { + ctx context.Context + executor *ExternallyControlled + startMaxVUs int64 // the scaled number of initially configured MaxVUs + duration time.Duration // the total duration of the executor, could be 0 for infinite + activeVUsCount *int64 // the current number of active VUs, used only for the progress display + maxVUs *int64 // the current number of initialized VUs + vuHandles []*manualVUHandle // handles for manipulating and tracking all of the VUs + currentlyPaused bool // whether the executor is currently paused + + runIteration func(context.Context, lib.ActiveVU) bool // a helper closure function that runs a single iteration +} + +// retrieveStartMaxVUs gets and initializes the (scaled) number of MaxVUs +// from the global VU buffer. These are the VUs that the user originally +// specified in the JS config, and that the ExecutionScheduler pre-initialized +// for us. +func (rs *externallyControlledRunState) retrieveStartMaxVUs() error { + for i := int64(0); i < rs.startMaxVUs; i++ { // get the initial planned VUs from the common buffer + initVU, vuGetErr := rs.executor.executionState.GetPlannedVU(rs.executor.logger, false) + if vuGetErr != nil { + return vuGetErr + } + vuHandle := rs.newManualVUHandle(initVU, rs.executor.logger.WithField("vuNum", i)) + go vuHandle.runLoopsIfPossible(rs.runIteration) + rs.vuHandles[i] = vuHandle + } + return nil +} + +func (rs *externallyControlledRunState) progressFn() (float64, []string) { + // TODO: simulate spinner for the other case or cycle 0-100? + currentActiveVUs := atomic.LoadInt64(rs.activeVUsCount) + currentMaxVUs := atomic.LoadInt64(rs.maxVUs) + vusFmt := pb.GetFixedLengthIntFormat(currentMaxVUs) + progVUs := fmt.Sprintf(vusFmt+"/"+vusFmt+" VUs", currentActiveVUs, currentMaxVUs) + + right := []string{progVUs, rs.duration.String(), ""} + + // TODO: use a saner way to calculate the elapsed time, without relying on + // the global execution state... + elapsed := rs.executor.executionState.GetCurrentTestRunDuration() - time.Duration( + rs.executor.config.StartTime.Duration) + if elapsed > rs.duration { + return 1, right + } + + progress := 0.0 + if rs.duration > 0 { + progress = math.Min(1, float64(elapsed)/float64(rs.duration)) + } + + spentDuration := pb.GetFixedLengthDuration(elapsed, rs.duration) + progDur := fmt.Sprintf("%s/%s", spentDuration, rs.duration) + right[1] = progDur + + return progress, right +} + +func (rs *externallyControlledRunState) handleConfigChange(oldCfg, newCfg ExternallyControlledConfigParams) error { + executionState := rs.executor.executionState + segment := executionState.Options.ExecutionSegment + oldActiveVUs := segment.Scale(oldCfg.VUs.Int64) + oldMaxVUs := segment.Scale(oldCfg.MaxVUs.Int64) + newActiveVUs := segment.Scale(newCfg.VUs.Int64) + newMaxVUs := segment.Scale(newCfg.MaxVUs.Int64) + + rs.executor.logger.WithFields(logrus.Fields{ + "oldActiveVUs": oldActiveVUs, "oldMaxVUs": oldMaxVUs, + "newActiveVUs": newActiveVUs, "newMaxVUs": newMaxVUs, + }).Debug("Updating execution configuration...") + + for i := oldMaxVUs; i < newMaxVUs; i++ { + select { // check if the user didn't try to abort k6 while we're scaling up the VUs + case <-rs.ctx.Done(): + return rs.ctx.Err() + default: // do nothing + } + initVU, vuInitErr := executionState.InitializeNewVU(rs.ctx, rs.executor.logger) + if vuInitErr != nil { + return vuInitErr + } + vuHandle := rs.newManualVUHandle(initVU, rs.executor.logger.WithField("vuNum", i)) + go vuHandle.runLoopsIfPossible(rs.runIteration) + rs.vuHandles = append(rs.vuHandles, vuHandle) + } + + if oldActiveVUs < newActiveVUs { + for i := oldActiveVUs; i < newActiveVUs; i++ { + if !rs.currentlyPaused { + if err := rs.vuHandles[i].start(); err != nil { + // TODO: maybe just log it ? + return err + } + } + } + } else { + for i := newActiveVUs; i < oldActiveVUs; i++ { + rs.vuHandles[i].hardStop() + } + for i := newActiveVUs; i < oldActiveVUs; i++ { + rs.vuHandles[i].wg.Wait() + } + } + + if oldMaxVUs > newMaxVUs { + for i := newMaxVUs; i < oldMaxVUs; i++ { + rs.vuHandles[i].cancelVU() + if i < rs.startMaxVUs { + // return the initial planned VUs to the common buffer + executionState.ReturnVU(rs.vuHandles[i].initVU, false) + } else { + executionState.ModInitializedVUsCount(-1) + } + rs.vuHandles[i] = nil + } + rs.vuHandles = rs.vuHandles[:newMaxVUs] + } + + atomic.StoreInt64(rs.maxVUs, newMaxVUs) + return nil +} + +// Run constantly loops through as many iterations as possible on a variable +// dynamically controlled number of VUs either for the specified duration, or +// until the test is manually stopped. +// nolint:funlen,gocognit +func (mex *ExternallyControlled) Run(parentCtx context.Context, out chan<- stats.SampleContainer) (err error) { + mex.configLock.RLock() + // Safely get the current config - it's important that the close of the + // hasStarted channel is inside of the lock, so that there are no data races + // between it and the UpdateConfig() method. + currentControlConfig := mex.currentControlConfig + close(mex.hasStarted) + mex.configLock.RUnlock() + + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + duration := time.Duration(currentControlConfig.Duration.Duration) + if duration > 0 { // Only keep track of duration if it's not infinite + go mex.stopWhenDurationIsReached(ctx, duration, cancel) + } + + mex.logger.WithFields( + logrus.Fields{"type": externallyControlledType, "duration": duration}, + ).Debug("Starting executor run...") + + startMaxVUs := mex.executionState.Options.ExecutionSegment.Scale(mex.config.MaxVUs.Int64) + runState := &externallyControlledRunState{ + ctx: ctx, + executor: mex, + startMaxVUs: startMaxVUs, + duration: duration, + vuHandles: make([]*manualVUHandle, startMaxVUs), + currentlyPaused: false, + activeVUsCount: new(int64), + maxVUs: new(int64), + runIteration: getIterationRunner(mex.executionState, mex.logger), + } + *runState.maxVUs = startMaxVUs + if err = runState.retrieveStartMaxVUs(); err != nil { + return err + } + + mex.progress.Modify(pb.WithProgress(runState.progressFn)) // Keep track of the progress + go trackProgress(parentCtx, ctx, ctx, mex, runState.progressFn) + + err = runState.handleConfigChange( // Start by setting MaxVUs to the starting MaxVUs + ExternallyControlledConfigParams{MaxVUs: mex.config.MaxVUs}, currentControlConfig, + ) + if err != nil { + return err + } + defer func() { // Make sure we release the VUs at the end + err = runState.handleConfigChange(currentControlConfig, ExternallyControlledConfigParams{}) + }() + + for { + select { + case <-ctx.Done(): + return nil + case updateConfigEvent := <-mex.newControlConfigs: + err := runState.handleConfigChange(currentControlConfig, updateConfigEvent.newConfig) + if err != nil { + updateConfigEvent.err <- err + if ctx.Err() == err { + return nil // we've already returned an error to the API client, but k6 should stop normally + } + return err + } + currentControlConfig = updateConfigEvent.newConfig + mex.configLock.Lock() + mex.currentControlConfig = updateConfigEvent.newConfig + mex.configLock.Unlock() + updateConfigEvent.err <- nil + + case pauseEvent := <-mex.pauseEvents: + if pauseEvent.isPaused == runState.currentlyPaused { + pauseEvent.err <- nil + continue + } + activeVUs := currentControlConfig.VUs.Int64 + if pauseEvent.isPaused { + for i := int64(0); i < activeVUs; i++ { + runState.vuHandles[i].gracefulStop() + } + for i := int64(0); i < activeVUs; i++ { + runState.vuHandles[i].wg.Wait() + } + } else { + for i := int64(0); i < activeVUs; i++ { + if err := runState.vuHandles[i].start(); err != nil { + // TODO again ... just log it? + pauseEvent.err <- err + return err + } + } + } + runState.currentlyPaused = pauseEvent.isPaused + pauseEvent.err <- nil + } + } +} diff --git a/lib/executor/externally_controlled_test.go b/lib/executor/externally_controlled_test.go new file mode 100644 index 00000000000..ed39557f701 --- /dev/null +++ b/lib/executor/externally_controlled_test.go @@ -0,0 +1,134 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/types" +) + +func getTestExternallyControlledConfig() ExternallyControlledConfig { + return ExternallyControlledConfig{ + ExternallyControlledConfigParams: ExternallyControlledConfigParams{ + VUs: null.IntFrom(2), + MaxVUs: null.IntFrom(10), + Duration: types.NullDurationFrom(2 * time.Second), + }, + } +} + +func TestExternallyControlledRun(t *testing.T) { + t.Parallel() + + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + + doneIters := new(uint64) + var ctx, cancel, executor, _ = setupExecutor( + t, getTestExternallyControlledConfig(), es, + simpleRunner(func(ctx context.Context) error { + time.Sleep(200 * time.Millisecond) + atomic.AddUint64(doneIters, 1) + return nil + }), + ) + defer cancel() + + var ( + wg sync.WaitGroup + errCh = make(chan error, 1) + doneCh = make(chan struct{}) + ) + wg.Add(1) + go func() { + defer wg.Done() + es.MarkStarted() + errCh <- executor.Run(ctx, nil) + es.MarkEnded() + close(doneCh) + }() + + updateConfig := func(vus, maxVUs int64, errMsg string) { + newConfig := ExternallyControlledConfigParams{ + VUs: null.IntFrom(vus), + MaxVUs: null.IntFrom(maxVUs), + Duration: types.NullDurationFrom(2 * time.Second), + } + err := executor.(*ExternallyControlled).UpdateConfig(ctx, newConfig) + if errMsg != "" { + assert.EqualError(t, err, errMsg) + } else { + assert.NoError(t, err) + } + } + + var resultVUCount [][]int64 + snapshot := func() { + resultVUCount = append(resultVUCount, + []int64{es.GetCurrentlyActiveVUsCount(), es.GetInitializedVUsCount()}) + } + + wg.Add(1) + go func() { + defer wg.Done() + snapshotTicker := time.NewTicker(500 * time.Millisecond) + ticks := 0 + for { + select { + case <-snapshotTicker.C: + snapshot() + switch ticks { + case 0, 2: + updateConfig(4, 10, "") + case 1: + updateConfig(8, 20, "") + case 3: + updateConfig(15, 10, + "invalid configuration supplied: the number of active VUs (15)"+ + " must be less than or equal to the number of maxVUs (10)") + updateConfig(-1, 10, + "invalid configuration supplied: the number of VUs shouldn't be negative") + } + ticks++ + case <-doneCh: + snapshotTicker.Stop() + snapshot() + return + } + } + }() + + wg.Wait() + require.NoError(t, <-errCh) + assert.Equal(t, uint64(48), atomic.LoadUint64(doneIters)) + assert.Equal(t, [][]int64{{2, 10}, {4, 10}, {8, 20}, {4, 10}, {0, 10}}, resultVUCount) +} diff --git a/lib/executor/helpers.go b/lib/executor/helpers.go new file mode 100644 index 00000000000..46fdf3c432d --- /dev/null +++ b/lib/executor/helpers.go @@ -0,0 +1,232 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "fmt" + "math/big" + "time" + + "github.com/sirupsen/logrus" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/ui/pb" +) + +func sumStagesDuration(stages []Stage) (result time.Duration) { + for _, s := range stages { + result += time.Duration(s.Duration.Duration) + } + return +} + +func getStagesUnscaledMaxTarget(unscaledStartValue int64, stages []Stage) int64 { + max := unscaledStartValue + for _, s := range stages { + if s.Target.Int64 > max { + max = s.Target.Int64 + } + } + return max +} + +// A helper function to avoid code duplication +func validateStages(stages []Stage) []error { + var errors []error + if len(stages) == 0 { + errors = append(errors, fmt.Errorf("at least one stage has to be specified")) + return errors + } + + for i, s := range stages { + stageNum := i + 1 + if !s.Duration.Valid { + errors = append(errors, fmt.Errorf("stage %d doesn't have a duration", stageNum)) + } else if s.Duration.Duration < 0 { + errors = append(errors, fmt.Errorf("the duration for stage %d shouldn't be negative", stageNum)) + } + if !s.Target.Valid { + errors = append(errors, fmt.Errorf("stage %d doesn't have a target", stageNum)) + } else if s.Target.Int64 < 0 { + errors = append(errors, fmt.Errorf("the target for stage %d shouldn't be negative", stageNum)) + } + } + return errors +} + +// getIterationRunner is a helper function that returns an iteration executor +// closure. It takes care of updating the execution state statistics and +// warning messages. And returns whether a full iteration was finished or not +// +// TODO: emit the end-of-test iteration metrics here (https://github.com/loadimpact/k6/issues/1250) +func getIterationRunner( + executionState *lib.ExecutionState, logger *logrus.Entry, +) func(context.Context, lib.ActiveVU) bool { + return func(ctx context.Context, vu lib.ActiveVU) bool { + err := vu.RunOnce() + + // TODO: track (non-ramp-down) errors from script iterations as a metric, + // and have a default threshold that will abort the script when the error + // rate exceeds a certain percentage + + select { + case <-ctx.Done(): + // Don't log errors or emit iterations metrics from cancelled iterations + executionState.AddInterruptedIterations(1) + return false + default: + if err != nil { + if s, ok := err.(fmt.Stringer); ok { + logger.Error(s.String()) + } else { + logger.Error(err.Error()) + } + // TODO: investigate context cancelled errors + } + + // TODO: move emission of end-of-iteration metrics here? + executionState.AddFullIterations(1) + return true + } + } +} + +// getDurationContexts is used to create sub-contexts that can restrict an +// executor to only run for its allotted time. +// +// If the executor doesn't have a graceful stop period for iterations, then +// both returned sub-contexts will be the same one, with a timeout equal to +// the supplied regular executor duration. +// +// But if a graceful stop is enabled, then the first returned context (and the +// cancel func) will be for the "outer" sub-context. Its timeout will include +// both the regular duration and the specified graceful stop period. The second +// context will be a sub-context of the first one and its timeout will include +// only the regular duration. +// +// In either case, the usage of these contexts should be like this: +// - As long as the regDurationCtx isn't done, new iterations can be started. +// - After regDurationCtx is done, no new iterations should be started; every +// VU that finishes an iteration from now on can be returned to the buffer +// pool in the ExecutionState struct. +// - After maxDurationCtx is done, any VUs with iterations will be +// interrupted by the context's closing and will be returned to the buffer. +// - If you want to interrupt the execution of all VUs prematurely (e.g. there +// was an error or something like that), trigger maxDurationCancel(). +// - If the whole test is aborted, the parent context will be cancelled, so +// that will also cancel these contexts, thus the "general abort" case is +// handled transparently. +func getDurationContexts(parentCtx context.Context, regularDuration, gracefulStop time.Duration) ( + startTime time.Time, maxDurationCtx, regDurationCtx context.Context, maxDurationCancel func(), +) { + startTime = time.Now() + maxEndTime := startTime.Add(regularDuration + gracefulStop) + + maxDurationCtx, maxDurationCancel = context.WithDeadline(parentCtx, maxEndTime) + if gracefulStop == 0 { + return startTime, maxDurationCtx, maxDurationCtx, maxDurationCancel + } + regDurationCtx, _ = context.WithDeadline(maxDurationCtx, startTime.Add(regularDuration)) //nolint:govet + return startTime, maxDurationCtx, regDurationCtx, maxDurationCancel +} + +// trackProgress is a helper function that monitors certain end-events in an +// executor and updates its progressbar accordingly. +func trackProgress( + parentCtx, maxDurationCtx, regDurationCtx context.Context, + exec lib.Executor, snapshot func() (float64, []string), +) { + progressBar := exec.GetProgress() + logger := exec.GetLogger() + + <-regDurationCtx.Done() // Wait for the regular context to be over + gracefulStop := exec.GetConfig().GetGracefulStop() + if parentCtx.Err() == nil && gracefulStop > 0 { + p, right := snapshot() + logger.WithField("gracefulStop", gracefulStop).Debug( + "Regular duration is done, waiting for iterations to gracefully finish", + ) + progressBar.Modify( + pb.WithStatus(pb.Stopping), + pb.WithConstProgress(p, right...), + ) + } + + <-maxDurationCtx.Done() + p, right := snapshot() + constProg := pb.WithConstProgress(p, right...) + select { + case <-parentCtx.Done(): + progressBar.Modify(pb.WithStatus(pb.Interrupted), constProg) + default: + status := pb.WithStatus(pb.Done) + if p < 1 { + status = pb.WithStatus(pb.Interrupted) + } + progressBar.Modify(status, constProg) + } +} + +// getScaledArrivalRate returns a rational number containing the scaled value of +// the given rate over the given period. This should generally be the first +// function that's called, before we do any calculations with the users-supplied +// rates in the arrival-rate executors. +func getScaledArrivalRate(es *lib.ExecutionSegment, rate int64, period time.Duration) *big.Rat { + return es.InPlaceScaleRat(big.NewRat(rate, int64(period))) +} + +// getTickerPeriod is just a helper function that returns the ticker interval +// we need for given arrival-rate parameters. +// +// It's possible for this function to return a zero duration (i.e. valid=false) +// and 0 isn't a valid ticker period. This happens so we don't divide by 0 when +// the arrival-rate period is 0. This case has to be handled separately. +func getTickerPeriod(scaledArrivalRate *big.Rat) types.NullDuration { + if scaledArrivalRate.Sign() == 0 { + return types.NewNullDuration(0, false) + } + // Basically, the ticker rate is time.Duration(1/arrivalRate). Considering + // that time.Duration is represented as int64 nanoseconds, no meaningful + // precision is likely to be lost here... + result, _ := new(big.Rat).Inv(scaledArrivalRate).Float64() + return types.NewNullDuration(time.Duration(result), true) +} + +// getArrivalRatePerSec returns the iterations per second rate. +func getArrivalRatePerSec(scaledArrivalRate *big.Rat) *big.Rat { + perSecRate := big.NewRat(int64(time.Second), 1) + return perSecRate.Mul(perSecRate, scaledArrivalRate) +} + +func getVUActivationParams( + ctx context.Context, conf BaseConfig, deactivateCallback func(lib.InitializedVU), +) *lib.VUActivationParams { + return &lib.VUActivationParams{ + RunContext: ctx, + Scenario: conf.Name, + Exec: conf.GetExec(), + Env: conf.GetEnv(), + Tags: conf.GetTags(), + DeactivateCallback: deactivateCallback, + } +} diff --git a/lib/executor/helpers_test.go b/lib/executor/helpers_test.go new file mode 100644 index 00000000000..81dd825b8d3 --- /dev/null +++ b/lib/executor/helpers_test.go @@ -0,0 +1,35 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2020 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import "github.com/loadimpact/k6/stats" + +func sumMetricValues(samples chan stats.SampleContainer, metricName string) (sum float64) { + for _, sc := range stats.GetBufferedSamples(samples) { + samples := sc.GetSamples() + for _, s := range samples { + if s.Metric.Name == metricName { + sum += s.Value + } + } + } + return sum +} diff --git a/lib/executor/per_vu_iterations.go b/lib/executor/per_vu_iterations.go new file mode 100644 index 00000000000..302ded8799d --- /dev/null +++ b/lib/executor/per_vu_iterations.go @@ -0,0 +1,243 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/metrics" + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats" + "github.com/loadimpact/k6/ui/pb" +) + +const perVUIterationsType = "per-vu-iterations" + +func init() { + lib.RegisterExecutorConfigType(perVUIterationsType, func(name string, rawJSON []byte) (lib.ExecutorConfig, error) { + config := NewPerVUIterationsConfig(name) + err := lib.StrictJSONUnmarshal(rawJSON, &config) + return config, err + }) +} + +// PerVUIterationsConfig stores the number of VUs iterations, as well as maxDuration settings +type PerVUIterationsConfig struct { + BaseConfig + VUs null.Int `json:"vus"` + Iterations null.Int `json:"iterations"` + MaxDuration types.NullDuration `json:"maxDuration"` +} + +// NewPerVUIterationsConfig returns a PerVUIterationsConfig with default values +func NewPerVUIterationsConfig(name string) PerVUIterationsConfig { + return PerVUIterationsConfig{ + BaseConfig: NewBaseConfig(name, perVUIterationsType), + VUs: null.NewInt(1, false), + Iterations: null.NewInt(1, false), + MaxDuration: types.NewNullDuration(10*time.Minute, false), // TODO: shorten? + } +} + +// Make sure we implement the lib.ExecutorConfig interface +var _ lib.ExecutorConfig = &PerVUIterationsConfig{} + +// GetVUs returns the scaled VUs for the executor. +func (pvic PerVUIterationsConfig) GetVUs(et *lib.ExecutionTuple) int64 { + return et.Segment.Scale(pvic.VUs.Int64) +} + +// GetIterations returns the UNSCALED iteration count for the executor. It's +// important to note that scaling per-VU iteration executor affects only the +// number of VUs. If we also scaled the iterations, scaling would have quadratic +// effects instead of just linear. +func (pvic PerVUIterationsConfig) GetIterations() int64 { + return pvic.Iterations.Int64 +} + +// GetDescription returns a human-readable description of the executor options +func (pvic PerVUIterationsConfig) GetDescription(et *lib.ExecutionTuple) string { + return fmt.Sprintf("%d iterations for each of %d VUs%s", + pvic.GetIterations(), pvic.GetVUs(et), + pvic.getBaseInfo(fmt.Sprintf("maxDuration: %s", pvic.MaxDuration.Duration))) +} + +// Validate makes sure all options are configured and valid +func (pvic PerVUIterationsConfig) Validate() []error { + errors := pvic.BaseConfig.Validate() + if pvic.VUs.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the number of VUs should be more than 0")) + } + + if pvic.Iterations.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the number of iterations should be more than 0")) + } + + if time.Duration(pvic.MaxDuration.Duration) < minDuration { + errors = append(errors, fmt.Errorf( + "the maxDuration should be at least %s, but is %s", minDuration, pvic.MaxDuration, + )) + } + + return errors +} + +// GetExecutionRequirements returns the number of required VUs to run the +// executor for its whole duration (disregarding any startTime), including the +// maximum waiting time for any iterations to gracefully stop. This is used by +// the execution scheduler in its VU reservation calculations, so it knows how +// many VUs to pre-initialize. +func (pvic PerVUIterationsConfig) GetExecutionRequirements(et *lib.ExecutionTuple) []lib.ExecutionStep { + return []lib.ExecutionStep{ + { + TimeOffset: 0, + PlannedVUs: uint64(pvic.GetVUs(et)), + }, + { + TimeOffset: time.Duration(pvic.MaxDuration.Duration + pvic.GracefulStop.Duration), + PlannedVUs: 0, + }, + } +} + +// NewExecutor creates a new PerVUIterations executor +func (pvic PerVUIterationsConfig) NewExecutor( + es *lib.ExecutionState, logger *logrus.Entry, +) (lib.Executor, error) { + return PerVUIterations{ + BaseExecutor: NewBaseExecutor(pvic, es, logger), + config: pvic, + }, nil +} + +// HasWork reports whether there is any work to be done for the given execution segment. +func (pvic PerVUIterationsConfig) HasWork(et *lib.ExecutionTuple) bool { + return pvic.GetVUs(et) > 0 && pvic.GetIterations() > 0 +} + +// PerVUIterations executes a specific number of iterations with each VU. +type PerVUIterations struct { + *BaseExecutor + config PerVUIterationsConfig +} + +// Make sure we implement the lib.Executor interface. +var _ lib.Executor = &PerVUIterations{} + +// Run executes a specific number of iterations with each configured VU. +// nolint:funlen +func (pvi PerVUIterations) Run(parentCtx context.Context, out chan<- stats.SampleContainer) (err error) { + numVUs := pvi.config.GetVUs(pvi.executionState.ExecutionTuple) + iterations := pvi.config.GetIterations() + duration := time.Duration(pvi.config.MaxDuration.Duration) + gracefulStop := pvi.config.GetGracefulStop() + + startTime, maxDurationCtx, regDurationCtx, cancel := getDurationContexts(parentCtx, duration, gracefulStop) + defer cancel() + + // Make sure the log and the progress bar have accurate information + pvi.logger.WithFields(logrus.Fields{ + "vus": numVUs, "iterations": iterations, "maxDuration": duration, "type": pvi.config.GetType(), + }).Debug("Starting executor run...") + + totalIters := uint64(numVUs * iterations) + doneIters := new(uint64) + + vusFmt := pb.GetFixedLengthIntFormat(numVUs) + itersFmt := pb.GetFixedLengthIntFormat(int64(totalIters)) + progressFn := func() (float64, []string) { + spent := time.Since(startTime) + progVUs := fmt.Sprintf(vusFmt+" VUs", numVUs) + currentDoneIters := atomic.LoadUint64(doneIters) + progIters := fmt.Sprintf(itersFmt+"/"+itersFmt+" iters, %d per VU", + currentDoneIters, totalIters, iterations) + right := []string{progVUs, duration.String(), progIters} + if spent > duration { + return 1, right + } + + spentDuration := pb.GetFixedLengthDuration(spent, duration) + progDur := fmt.Sprintf("%s/%s", spentDuration, duration) + right[1] = progDur + + return float64(currentDoneIters) / float64(totalIters), right + } + pvi.progress.Modify(pb.WithProgress(progressFn)) + go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, pvi, progressFn) + + // Actually schedule the VUs and iterations... + activeVUs := &sync.WaitGroup{} + defer activeVUs.Wait() + + regDurationDone := regDurationCtx.Done() + runIteration := getIterationRunner(pvi.executionState, pvi.logger) + + activationParams := getVUActivationParams(maxDurationCtx, pvi.config.BaseConfig, + func(u lib.InitializedVU) { + pvi.executionState.ReturnVU(u, true) + activeVUs.Done() + }) + handleVU := func(initVU lib.InitializedVU) { + ctx, cancel := context.WithCancel(maxDurationCtx) + defer cancel() + + newParams := *activationParams + newParams.RunContext = ctx + + vuID := initVU.GetID() + activeVU := initVU.Activate(&newParams) + + for i := int64(0); i < iterations; i++ { + select { + case <-regDurationDone: + stats.PushIfNotDone(parentCtx, out, stats.Sample{ + Value: float64(iterations - i), Metric: metrics.DroppedIterations, + Tags: pvi.getMetricTags(&vuID), Time: time.Now(), + }) + return // don't make more iterations + default: + // continue looping + } + runIteration(maxDurationCtx, activeVU) + atomic.AddUint64(doneIters, 1) + } + } + + for i := int64(0); i < numVUs; i++ { + initializedVU, err := pvi.executionState.GetPlannedVU(pvi.logger, true) + if err != nil { + cancel() + return err + } + activeVUs.Add(1) + go handleVU(initializedVU) + } + + return nil +} diff --git a/lib/executor/per_vu_iterations_test.go b/lib/executor/per_vu_iterations_test.go new file mode 100644 index 00000000000..4eb57303390 --- /dev/null +++ b/lib/executor/per_vu_iterations_test.go @@ -0,0 +1,158 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/metrics" + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats" +) + +func getTestPerVUIterationsConfig() PerVUIterationsConfig { + return PerVUIterationsConfig{ + BaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(1 * time.Second)}, + VUs: null.IntFrom(10), + Iterations: null.IntFrom(100), + MaxDuration: types.NullDurationFrom(3 * time.Second), + } +} + +// Baseline test +func TestPerVUIterationsRun(t *testing.T) { + t.Parallel() + var result sync.Map + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + var ctx, cancel, executor, _ = setupExecutor( + t, getTestPerVUIterationsConfig(), es, + simpleRunner(func(ctx context.Context) error { + state := lib.GetState(ctx) + currIter, _ := result.LoadOrStore(state.Vu, uint64(0)) + result.Store(state.Vu, currIter.(uint64)+1) + return nil + }), + ) + defer cancel() + err = executor.Run(ctx, nil) + require.NoError(t, err) + + var totalIters uint64 + result.Range(func(key, value interface{}) bool { + vuIters := value.(uint64) + assert.Equal(t, uint64(100), vuIters) + totalIters += vuIters + return true + }) + assert.Equal(t, uint64(1000), totalIters) +} + +// Test that when one VU "slows down", others will *not* pick up the workload. +// This is the reverse behavior of the SharedIterations executor. +func TestPerVUIterationsRunVariableVU(t *testing.T) { + t.Parallel() + var ( + result sync.Map + slowVUID int64 + ) + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + var ctx, cancel, executor, _ = setupExecutor( + t, getTestPerVUIterationsConfig(), es, + simpleRunner(func(ctx context.Context) error { + state := lib.GetState(ctx) + // Pick one VU randomly and always slow it down. + sid := atomic.LoadInt64(&slowVUID) + if sid == int64(0) { + atomic.StoreInt64(&slowVUID, state.Vu) + } + if sid == state.Vu { + time.Sleep(200 * time.Millisecond) + } + currIter, _ := result.LoadOrStore(state.Vu, uint64(0)) + result.Store(state.Vu, currIter.(uint64)+1) + return nil + }), + ) + defer cancel() + err = executor.Run(ctx, nil) + require.NoError(t, err) + + val, ok := result.Load(slowVUID) + assert.True(t, ok) + + var totalIters uint64 + result.Range(func(key, value interface{}) bool { + vuIters := value.(uint64) + if key != slowVUID { + assert.Equal(t, uint64(100), vuIters) + } + totalIters += vuIters + return true + }) + + // The slow VU should complete 16 iterations given these timings, + // while the rest should equally complete their assigned 100 iterations. + assert.Equal(t, uint64(16), val) + assert.Equal(t, uint64(916), totalIters) +} + +func TestPerVuIterationsEmitDroppedIterations(t *testing.T) { + t.Parallel() + var count int64 + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + + config := PerVUIterationsConfig{ + VUs: null.IntFrom(5), + Iterations: null.IntFrom(20), + MaxDuration: types.NullDurationFrom(1 * time.Second), + } + + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + ctx, cancel, executor, logHook := setupExecutor( + t, config, es, + simpleRunner(func(ctx context.Context) error { + atomic.AddInt64(&count, 1) + <-ctx.Done() + return nil + }), + ) + defer cancel() + engineOut := make(chan stats.SampleContainer, 1000) + err = executor.Run(ctx, engineOut) + require.NoError(t, err) + assert.Empty(t, logHook.Drain()) + assert.Equal(t, int64(5), count) + assert.Equal(t, float64(95), sumMetricValues(engineOut, metrics.DroppedIterations.Name)) +} diff --git a/lib/executor/ramping_arrival_rate.go b/lib/executor/ramping_arrival_rate.go new file mode 100644 index 00000000000..d996dc51161 --- /dev/null +++ b/lib/executor/ramping_arrival_rate.go @@ -0,0 +1,474 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/metrics" + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats" + "github.com/loadimpact/k6/ui/pb" +) + +const rampingArrivalRateType = "ramping-arrival-rate" + +func init() { + lib.RegisterExecutorConfigType( + rampingArrivalRateType, + func(name string, rawJSON []byte) (lib.ExecutorConfig, error) { + config := NewRampingArrivalRateConfig(name) + err := lib.StrictJSONUnmarshal(rawJSON, &config) + return config, err + }, + ) +} + +// RampingArrivalRateConfig stores config for the ramping (i.e. variable) +// arrival-rate executor. +type RampingArrivalRateConfig struct { + BaseConfig + StartRate null.Int `json:"startRate"` + TimeUnit types.NullDuration `json:"timeUnit"` + Stages []Stage `json:"stages"` + + // Initialize `PreAllocatedVUs` number of VUs, and if more than that are needed, + // they will be dynamically allocated, until `MaxVUs` is reached, which is an + // absolutely hard limit on the number of VUs the executor will use + PreAllocatedVUs null.Int `json:"preAllocatedVUs"` + MaxVUs null.Int `json:"maxVUs"` +} + +// NewRampingArrivalRateConfig returns a RampingArrivalRateConfig with default values +func NewRampingArrivalRateConfig(name string) *RampingArrivalRateConfig { + return &RampingArrivalRateConfig{ + BaseConfig: NewBaseConfig(name, rampingArrivalRateType), + TimeUnit: types.NewNullDuration(1*time.Second, false), + } +} + +// Make sure we implement the lib.ExecutorConfig interface +var _ lib.ExecutorConfig = &RampingArrivalRateConfig{} + +// GetPreAllocatedVUs is just a helper method that returns the scaled pre-allocated VUs. +func (varc RampingArrivalRateConfig) GetPreAllocatedVUs(et *lib.ExecutionTuple) int64 { + return et.Segment.Scale(varc.PreAllocatedVUs.Int64) +} + +// GetMaxVUs is just a helper method that returns the scaled max VUs. +func (varc RampingArrivalRateConfig) GetMaxVUs(et *lib.ExecutionTuple) int64 { + return et.Segment.Scale(varc.MaxVUs.Int64) +} + +// GetDescription returns a human-readable description of the executor options +func (varc RampingArrivalRateConfig) GetDescription(et *lib.ExecutionTuple) string { + // TODO: something better? always show iterations per second? + maxVUsRange := fmt.Sprintf("maxVUs: %d", et.Segment.Scale(varc.PreAllocatedVUs.Int64)) + if varc.MaxVUs.Int64 > varc.PreAllocatedVUs.Int64 { + maxVUsRange += fmt.Sprintf("-%d", et.Segment.Scale(varc.MaxVUs.Int64)) + } + maxUnscaledRate := getStagesUnscaledMaxTarget(varc.StartRate.Int64, varc.Stages) + maxArrRatePerSec, _ := getArrivalRatePerSec( + getScaledArrivalRate(et.Segment, maxUnscaledRate, time.Duration(varc.TimeUnit.Duration)), + ).Float64() + + return fmt.Sprintf("Up to %.2f iterations/s for %s over %d stages%s", + maxArrRatePerSec, sumStagesDuration(varc.Stages), + len(varc.Stages), varc.getBaseInfo(maxVUsRange)) +} + +// Validate makes sure all options are configured and valid +func (varc *RampingArrivalRateConfig) Validate() []error { + errors := varc.BaseConfig.Validate() + + if varc.StartRate.Int64 < 0 { + errors = append(errors, fmt.Errorf("the startRate value shouldn't be negative")) + } + + if time.Duration(varc.TimeUnit.Duration) < 0 { + errors = append(errors, fmt.Errorf("the timeUnit should be more than 0")) + } + + errors = append(errors, validateStages(varc.Stages)...) + + if !varc.PreAllocatedVUs.Valid { + errors = append(errors, fmt.Errorf("the number of preAllocatedVUs isn't specified")) + } else if varc.PreAllocatedVUs.Int64 < 0 { + errors = append(errors, fmt.Errorf("the number of preAllocatedVUs shouldn't be negative")) + } + + if !varc.MaxVUs.Valid { + // TODO: don't change the config while validating + varc.MaxVUs.Int64 = varc.PreAllocatedVUs.Int64 + } else if varc.MaxVUs.Int64 < varc.PreAllocatedVUs.Int64 { + errors = append(errors, fmt.Errorf("maxVUs shouldn't be less than preAllocatedVUs")) + } + + return errors +} + +// GetExecutionRequirements returns the number of required VUs to run the +// executor for its whole duration (disregarding any startTime), including the +// maximum waiting time for any iterations to gracefully stop. This is used by +// the execution scheduler in its VU reservation calculations, so it knows how +// many VUs to pre-initialize. +func (varc RampingArrivalRateConfig) GetExecutionRequirements(et *lib.ExecutionTuple) []lib.ExecutionStep { + return []lib.ExecutionStep{ + { + TimeOffset: 0, + PlannedVUs: uint64(et.Segment.Scale(varc.PreAllocatedVUs.Int64)), + MaxUnplannedVUs: uint64(et.Segment.Scale(varc.MaxVUs.Int64 - varc.PreAllocatedVUs.Int64)), + }, + { + TimeOffset: sumStagesDuration(varc.Stages) + time.Duration(varc.GracefulStop.Duration), + PlannedVUs: 0, + MaxUnplannedVUs: 0, + }, + } +} + +// NewExecutor creates a new RampingArrivalRate executor +func (varc RampingArrivalRateConfig) NewExecutor( + es *lib.ExecutionState, logger *logrus.Entry, +) (lib.Executor, error) { + return RampingArrivalRate{ + BaseExecutor: NewBaseExecutor(&varc, es, logger), + config: varc, + }, nil +} + +// HasWork reports whether there is any work to be done for the given execution segment. +func (varc RampingArrivalRateConfig) HasWork(et *lib.ExecutionTuple) bool { + return varc.GetMaxVUs(et) > 0 +} + +// RampingArrivalRate tries to execute a specific number of iterations for a +// specific period. +// TODO: combine with the ConstantArrivalRate? +type RampingArrivalRate struct { + *BaseExecutor + config RampingArrivalRateConfig +} + +// Make sure we implement the lib.Executor interface. +var _ lib.Executor = &RampingArrivalRate{} + +// cal calculates the transtitions between stages and gives the next full value produced by the +// stages. In this explanation we are talking about events and in practice those events are starting +// of an iteration, but could really be anything that needs to occur at a constant or linear rate. +// +// The basic idea is that we make a graph with the X axis being time and the Y axis being +// events/s we know that the area of the figure between the graph and the X axis is equal to the +// amount of events done - we multiply time by events per time so we get events ... +// Mathematics :). +// +// Lets look at a simple example - lets say we start with 2 events and the first stage is 5 +// seconds to 2 events/s and then we have a second stage for 5 second that goes up to 3 events +// (using small numbers because ... well it is easier :D). This will look something like: +// ^ +// 7| +// 6| +// 5| +// 4| +// 3| ,-+ +// 2|----+-' | +// 1| | | +// +----+----+----------------------------------> +// 0s 5s 10s +// TODO: bigger and more stages +// +// Now the question is when(where on the graph) does the first event happen? Well in this simple +// case it is easy it will be at 0.5 seconds as we are doing 2 events/s. If we want to know when +// event n will happen we need to calculate n = 2 * x, where x is the time it will happen, so we +// need to calculate x = n/2as we are interested in the time, x. +// So if we just had a constant function for each event n we can calculate n/2 and find out when +// it needs to start. +// As we can see though the graph changes as stages change. But we can calculate how many events +// each stage will have, again it is the area from the start of the stage to it's end and between +// the graph and the X axis. So in this case we know that the first stage will have 10 full events +// in it and no more or less. So we are trying to find out when the 12 event will happen the answer +// will be after the 5th second. +// +// The graph doesn't show this well but we are ramping up linearly (we could possibly add +// other ramping up/down functions later). So at 7.5 seconds for example we should be doing 2.5 +// events/s. You could start slicing the graph constantly and in this way to represent the ramping +// up/down as a multiple constant functions, and you will get mostly okayish results. But here is +// where calculus comes into play. Calculus gives us a way of exactly calculate the area for any +// given function and linear ramp up/downs just happen to be pretty easy(actual math prove in +// https://github.com/loadimpact/k6/issues/1299#issuecomment-575661084). +// +// One tricky last point is what happens if stage only completes 9.8 events? Let's say that the +// first stage above was 4.9 seconds long 2 * 4.9 is 9.8, we have 9 events and .8 of an event, what +// do with do with that? Well the 10th even will happen in the next stage (if any) and will happen +// when the are from the start till time x is 0.2 (instead of 1) as 0.2 + 0.8 is 10. So the 12th for +// example will be when the area is 2.2 as 9.8+2.2. So we just carry this around. +// +// So in the end what calis doing is to get formulas which will tell it when +// a given event n in order will happen. It helps itself by knowing that in a given +// stage will do some given amount (the area of the stage) events and if we past that one we +// know we are not in that stage. +// +// The specific implementation here can only go forward and does incorporate +// the striping algorithm from the lib.ExecutionTuple for additional speed up but this could +// possibly be refactored if need for this arises. +func (varc RampingArrivalRateConfig) cal(et *lib.ExecutionTuple, ch chan<- time.Duration) { + start, offsets, _ := et.GetStripedOffsets() + li := -1 + // TODO: move this to a utility function, or directly what GetStripedOffsets uses once we see everywhere we will use it + next := func() int64 { + li++ + return offsets[li%len(offsets)] + } + defer close(ch) // TODO: maybe this is not a good design - closing a channel we get + var ( + stageStart time.Duration + timeUnit = float64(varc.TimeUnit.Duration) + doneSoFar, endCount, to, dur float64 + from = float64(varc.StartRate.ValueOrZero()) / timeUnit + // start .. starts at 0 but the algorithm works with area so we need to start from 1 not 0 + i = float64(start + 1) + ) + + for _, stage := range varc.Stages { + to = float64(stage.Target.ValueOrZero()) / timeUnit + dur = float64(stage.Duration.Duration) + if from != to { // ramp up/down + endCount += dur * ((to-from)/2 + from) + for ; i <= endCount; i += float64(next()) { + // TODO: try to twist this in a way to be able to get i (the only changing part) + // somewhere where it is less in the middle of the equation + x := (from*dur - math.Sqrt(dur*(from*from*dur+2*(i-doneSoFar)*(to-from)))) / (from - to) + + ch <- time.Duration(x) + stageStart + } + } else { + endCount += dur * to + for ; i <= endCount; i += float64(next()) { + ch <- time.Duration((i-doneSoFar)/to) + stageStart + } + } + doneSoFar = endCount + from = to + stageStart += time.Duration(stage.Duration.Duration) + } +} + +// Run executes a variable number of iterations per second. +// +// TODO: Split this up and make an independent component that can be reused +// between the constant and ramping arrival rate executors - that way we can +// keep the complexity in one well-architected part (with short methods and few +// lambdas :D), while having both config frontends still be present for maximum +// UX benefits. Basically, keep the progress bars and scheduling (i.e. at what +// time should iteration X begin) different, but keep everyhing else the same. +// This will allow us to implement https://github.com/loadimpact/k6/issues/1386 +// and things like all of the TODOs below in one place only. +//nolint:funlen,gocognit +func (varr RampingArrivalRate) Run(parentCtx context.Context, out chan<- stats.SampleContainer) (err error) { + segment := varr.executionState.ExecutionTuple.Segment + gracefulStop := varr.config.GetGracefulStop() + duration := sumStagesDuration(varr.config.Stages) + preAllocatedVUs := varr.config.GetPreAllocatedVUs(varr.executionState.ExecutionTuple) + maxVUs := varr.config.GetMaxVUs(varr.executionState.ExecutionTuple) + + // TODO: refactor and simplify + timeUnit := time.Duration(varr.config.TimeUnit.Duration) + startArrivalRate := getScaledArrivalRate(segment, varr.config.StartRate.Int64, timeUnit) + maxUnscaledRate := getStagesUnscaledMaxTarget(varr.config.StartRate.Int64, varr.config.Stages) + maxArrivalRatePerSec, _ := getArrivalRatePerSec(getScaledArrivalRate(segment, maxUnscaledRate, timeUnit)).Float64() + startTickerPeriod := getTickerPeriod(startArrivalRate) + + // Make sure the log and the progress bar have accurate information + varr.logger.WithFields(logrus.Fields{ + "maxVUs": maxVUs, "preAllocatedVUs": preAllocatedVUs, "duration": duration, "numStages": len(varr.config.Stages), + "startTickerPeriod": startTickerPeriod.Duration, "type": varr.config.GetType(), + }).Debug("Starting executor run...") + + activeVUsWg := &sync.WaitGroup{} + + returnedVUs := make(chan struct{}) + startTime, maxDurationCtx, regDurationCtx, cancel := getDurationContexts(parentCtx, duration, gracefulStop) + + defer func() { + // Make sure all VUs aren't executing iterations anymore, for the cancel() + // below to deactivate them. + <-returnedVUs + cancel() + activeVUsWg.Wait() + }() + activeVUs := make(chan lib.ActiveVU, maxVUs) + activeVUsCount := uint64(0) + + activationParams := getVUActivationParams(maxDurationCtx, varr.config.BaseConfig, + func(u lib.InitializedVU) { + varr.executionState.ReturnVU(u, true) + activeVUsWg.Done() + }) + activateVU := func(initVU lib.InitializedVU) lib.ActiveVU { + activeVUsWg.Add(1) + activeVU := initVU.Activate(activationParams) + varr.executionState.ModCurrentlyActiveVUsCount(+1) + atomic.AddUint64(&activeVUsCount, 1) + return activeVU + } + + remainingUnplannedVUs := maxVUs - preAllocatedVUs + makeUnplannedVUCh := make(chan struct{}) + defer close(makeUnplannedVUCh) + go func() { + defer close(returnedVUs) + defer func() { + // this is done here as to not have an unplannedVU in the middle of initialization when + // starting to return activeVUs + for i := uint64(0); i < atomic.LoadUint64(&activeVUsCount); i++ { + <-activeVUs + } + }() + for range makeUnplannedVUCh { + varr.logger.Debug("Starting initialization of an unplanned VU...") + initVU, err := varr.executionState.GetUnplannedVU(maxDurationCtx, varr.logger) + if err != nil { + // TODO figure out how to return it to the Run goroutine + varr.logger.WithError(err).Error("Error while allocating unplanned VU") + } else { + varr.logger.Debug("The unplanned VU finished initializing successfully!") + activeVUs <- activateVU(initVU) + } + } + }() + + // Get the pre-allocated VUs in the local buffer + for i := int64(0); i < preAllocatedVUs; i++ { + initVU, err := varr.executionState.GetPlannedVU(varr.logger, false) + if err != nil { + return err + } + activeVUs <- activateVU(initVU) + } + + tickerPeriod := int64(startTickerPeriod.Duration) + + vusFmt := pb.GetFixedLengthIntFormat(maxVUs) + itersFmt := pb.GetFixedLengthFloatFormat(maxArrivalRatePerSec, 0) + " iters/s" + + progressFn := func() (float64, []string) { + currActiveVUs := atomic.LoadUint64(&activeVUsCount) + currentTickerPeriod := atomic.LoadInt64(&tickerPeriod) + vusInBuffer := uint64(len(activeVUs)) + progVUs := fmt.Sprintf(vusFmt+"/"+vusFmt+" VUs", + currActiveVUs-vusInBuffer, currActiveVUs) + + itersPerSec := 0.0 + if currentTickerPeriod > 0 { + itersPerSec = float64(time.Second) / float64(currentTickerPeriod) + } + progIters := fmt.Sprintf(itersFmt, itersPerSec) + + right := []string{progVUs, duration.String(), progIters} + + spent := time.Since(startTime) + if spent > duration { + return 1, right + } + + spentDuration := pb.GetFixedLengthDuration(spent, duration) + progDur := fmt.Sprintf("%s/%s", spentDuration, duration) + right[1] = progDur + + return math.Min(1, float64(spent)/float64(duration)), right + } + + varr.progress.Modify(pb.WithProgress(progressFn)) + go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, varr, progressFn) + + regDurationDone := regDurationCtx.Done() + runIterationBasic := getIterationRunner(varr.executionState, varr.logger) + runIteration := func(vu lib.ActiveVU) { + runIterationBasic(maxDurationCtx, vu) + activeVUs <- vu + } + + timer := time.NewTimer(time.Hour) + start := time.Now() + ch := make(chan time.Duration, 10) // buffer 10 iteration times ahead + var prevTime time.Duration + shownWarning := false + metricTags := varr.getMetricTags(nil) + go varr.config.cal(varr.executionState.ExecutionTuple, ch) + for nextTime := range ch { + select { + case <-regDurationDone: + return nil + default: + } + atomic.StoreInt64(&tickerPeriod, int64(nextTime-prevTime)) + prevTime = nextTime + b := time.Until(start.Add(nextTime)) + if b > 0 { // TODO: have a minimal ? + timer.Reset(b) + select { + case <-timer.C: + case <-regDurationDone: + return nil + } + } + + select { + case vu := <-activeVUs: // ideally, we get the VU from the buffer without any issues + go runIteration(vu) //TODO: refactor so we dont spin up a goroutine for each iteration + continue + default: // no free VUs currently available + } + // Since there aren't any free VUs available, consider this iteration + // dropped - we aren't going to try to recover it, but + + stats.PushIfNotDone(parentCtx, out, stats.Sample{ + Value: 1, Metric: metrics.DroppedIterations, + Tags: metricTags, Time: time.Now(), + }) + + // We'll try to start allocating another VU in the background, + // non-blockingly, if we have remainingUnplannedVUs... + if remainingUnplannedVUs == 0 { + if !shownWarning { + varr.logger.Warningf("Insufficient VUs, reached %d active VUs and cannot initialize more", maxVUs) + shownWarning = true + } + continue + } + + select { + case makeUnplannedVUCh <- struct{}{}: // great! + remainingUnplannedVUs-- + default: // we're already allocating a new VU + } + } + return nil +} diff --git a/lib/executor/ramping_arrival_rate_test.go b/lib/executor/ramping_arrival_rate_test.go new file mode 100644 index 00000000000..0b830ef62a1 --- /dev/null +++ b/lib/executor/ramping_arrival_rate_test.go @@ -0,0 +1,581 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "fmt" + "math/big" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/metrics" + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats" +) + +func getTestRampingArrivalRateConfig() *RampingArrivalRateConfig { + return &RampingArrivalRateConfig{ + BaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(1 * time.Second)}, + TimeUnit: types.NullDurationFrom(time.Second), + StartRate: null.IntFrom(10), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(time.Second * 1), + Target: null.IntFrom(10), + }, + { + Duration: types.NullDurationFrom(time.Second * 1), + Target: null.IntFrom(50), + }, + { + Duration: types.NullDurationFrom(time.Second * 1), + Target: null.IntFrom(50), + }, + }, + PreAllocatedVUs: null.IntFrom(10), + MaxVUs: null.IntFrom(20), + } +} + +func TestRampingArrivalRateRunNotEnoughAllocatedVUsWarn(t *testing.T) { + t.Parallel() + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + ctx, cancel, executor, logHook := setupExecutor( + t, getTestRampingArrivalRateConfig(), es, + simpleRunner(func(ctx context.Context) error { + time.Sleep(time.Second) + return nil + }), + ) + defer cancel() + engineOut := make(chan stats.SampleContainer, 1000) + err = executor.Run(ctx, engineOut) + require.NoError(t, err) + entries := logHook.Drain() + require.NotEmpty(t, entries) + for _, entry := range entries { + require.Equal(t, + "Insufficient VUs, reached 20 active VUs and cannot initialize more", + entry.Message) + require.Equal(t, logrus.WarnLevel, entry.Level) + } +} + +func TestRampingArrivalRateRunCorrectRate(t *testing.T) { + t.Parallel() + var count int64 + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + ctx, cancel, executor, logHook := setupExecutor( + t, getTestRampingArrivalRateConfig(), es, + simpleRunner(func(ctx context.Context) error { + atomic.AddInt64(&count, 1) + return nil + }), + ) + defer cancel() + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + // check that we got around the amount of VU iterations as we would expect + var currentCount int64 + + time.Sleep(time.Second) + currentCount = atomic.SwapInt64(&count, 0) + assert.InDelta(t, 10, currentCount, 1) + + time.Sleep(time.Second) + currentCount = atomic.SwapInt64(&count, 0) + assert.InDelta(t, 30, currentCount, 2) + + time.Sleep(time.Second) + currentCount = atomic.SwapInt64(&count, 0) + assert.InDelta(t, 50, currentCount, 2) + }() + engineOut := make(chan stats.SampleContainer, 1000) + err = executor.Run(ctx, engineOut) + wg.Wait() + require.NoError(t, err) + require.Empty(t, logHook.Drain()) +} + +func TestRampingArrivalRateRunUnplannedVUs(t *testing.T) { + t.Parallel() + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 1, 3) + var count int64 + var ch = make(chan struct{}) // closed when new unplannedVU is started and signal to get to next iterations + var ch2 = make(chan struct{}) // closed when a second iteration was started on an old VU in order to test it won't start a second unplanned VU in parallel or at all + runner := simpleRunner(func(ctx context.Context) error { + cur := atomic.AddInt64(&count, 1) + if cur == 1 { + <-ch // wait to start again + } else if cur == 2 { + <-ch2 // wait to start again + } + + return nil + }) + var ctx, cancel, executor, logHook = setupExecutor( + t, &RampingArrivalRateConfig{ + TimeUnit: types.NullDurationFrom(time.Second), + Stages: []Stage{ + { + // the minus one makes it so only 9 iterations will be started instead of 10 + // as the 10th happens to be just at the end and sometimes doesn't get executed :( + Duration: types.NullDurationFrom(time.Second*2 - 1), + Target: null.IntFrom(10), + }, + }, + PreAllocatedVUs: null.IntFrom(1), + MaxVUs: null.IntFrom(3), + }, + es, runner) + defer cancel() + var engineOut = make(chan stats.SampleContainer, 1000) + es.SetInitVUFunc(func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { + cur := atomic.LoadInt64(&count) + require.Equal(t, cur, int64(1)) + time.Sleep(time.Second / 2) + + close(ch) + time.Sleep(time.Millisecond * 50) + + cur = atomic.LoadInt64(&count) + require.Equal(t, cur, int64(2)) + + time.Sleep(time.Millisecond * 50) + cur = atomic.LoadInt64(&count) + require.Equal(t, cur, int64(2)) + + close(ch2) + time.Sleep(time.Millisecond * 100) + cur = atomic.LoadInt64(&count) + require.NotEqual(t, cur, int64(2)) + return runner.NewVU(int64(es.GetUniqueVUIdentifier()), engineOut) + }) + err = executor.Run(ctx, engineOut) + assert.NoError(t, err) + assert.Empty(t, logHook.Drain()) + + droppedIters := sumMetricValues(engineOut, metrics.DroppedIterations.Name) + assert.Equal(t, count+int64(droppedIters), int64(9)) +} + +func TestRampingArrivalRateRunCorrectRateWithSlowRate(t *testing.T) { + t.Parallel() + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 1, 3) + var count int64 + var ch = make(chan struct{}) // closed when new unplannedVU is started and signal to get to next iterations + runner := simpleRunner(func(ctx context.Context) error { + cur := atomic.AddInt64(&count, 1) + if cur == 1 { + <-ch // wait to start again + } + + return nil + }) + var ctx, cancel, executor, logHook = setupExecutor( + t, &RampingArrivalRateConfig{ + TimeUnit: types.NullDurationFrom(time.Second), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(time.Second * 2), + Target: null.IntFrom(10), + }, + }, + PreAllocatedVUs: null.IntFrom(1), + MaxVUs: null.IntFrom(3), + }, + es, runner) + defer cancel() + var engineOut = make(chan stats.SampleContainer, 1000) + es.SetInitVUFunc(func(_ context.Context, logger *logrus.Entry) (lib.InitializedVU, error) { + t.Log("init") + cur := atomic.LoadInt64(&count) + require.Equal(t, cur, int64(1)) + time.Sleep(time.Millisecond * 200) + close(ch) + time.Sleep(time.Millisecond * 200) + cur = atomic.LoadInt64(&count) + require.NotEqual(t, cur, int64(1)) + + return runner.NewVU(int64(es.GetUniqueVUIdentifier()), engineOut) + }) + err = executor.Run(ctx, engineOut) + assert.NoError(t, err) + assert.Empty(t, logHook.Drain()) + assert.Equal(t, int64(0), es.GetCurrentlyActiveVUsCount()) + assert.Equal(t, int64(2), es.GetInitializedVUsCount()) +} + +func mustNewExecutionTuple(seg *lib.ExecutionSegment, seq *lib.ExecutionSegmentSequence) *lib.ExecutionTuple { + et, err := lib.NewExecutionTuple(seg, seq) + if err != nil { + panic(err) + } + return et +} + +func TestRampingArrivalRateCal(t *testing.T) { + t.Parallel() + + var ( + defaultTimeUnit = time.Second + config = RampingArrivalRateConfig{ + StartRate: null.IntFrom(0), + Stages: []Stage{ // TODO make this even bigger and longer .. will need more time + { + Duration: types.NullDurationFrom(time.Second * 5), + Target: null.IntFrom(1), + }, + { + Duration: types.NullDurationFrom(time.Second * 1), + Target: null.IntFrom(1), + }, + { + Duration: types.NullDurationFrom(time.Second * 5), + Target: null.IntFrom(0), + }, + }, + } + ) + + testCases := []struct { + expectedTimes []time.Duration + et *lib.ExecutionTuple + timeUnit time.Duration + }{ + { + expectedTimes: []time.Duration{time.Millisecond * 3162, time.Millisecond * 4472, time.Millisecond * 5500, time.Millisecond * 6527, time.Millisecond * 7837, time.Second * 11}, + et: mustNewExecutionTuple(nil, nil), + }, + { + expectedTimes: []time.Duration{time.Millisecond * 4472, time.Millisecond * 7837}, + et: mustNewExecutionTuple(newExecutionSegmentFromString("0:1/3"), nil), + }, + { + expectedTimes: []time.Duration{time.Millisecond * 4472, time.Millisecond * 7837}, + et: mustNewExecutionTuple(newExecutionSegmentFromString("0:1/3"), newExecutionSegmentSequenceFromString("0,1/3,1")), + }, + { + expectedTimes: []time.Duration{time.Millisecond * 4472, time.Millisecond * 7837}, + et: mustNewExecutionTuple(newExecutionSegmentFromString("1/3:2/3"), nil), + }, + { + expectedTimes: []time.Duration{time.Millisecond * 4472, time.Millisecond * 7837}, + et: mustNewExecutionTuple(newExecutionSegmentFromString("2/3:1"), nil), + }, + { + expectedTimes: []time.Duration{time.Millisecond * 3162, time.Millisecond * 6527}, + et: mustNewExecutionTuple(newExecutionSegmentFromString("0:1/3"), newExecutionSegmentSequenceFromString("0,1/3,2/3,1")), + }, + { + expectedTimes: []time.Duration{time.Millisecond * 4472, time.Millisecond * 7837}, + et: mustNewExecutionTuple(newExecutionSegmentFromString("1/3:2/3"), newExecutionSegmentSequenceFromString("0,1/3,2/3,1")), + }, + { + expectedTimes: []time.Duration{time.Millisecond * 5500, time.Millisecond * 11000}, + et: mustNewExecutionTuple(newExecutionSegmentFromString("2/3:1"), newExecutionSegmentSequenceFromString("0,1/3,2/3,1")), + }, + { + expectedTimes: []time.Duration{ + time.Millisecond * 1825, time.Millisecond * 2581, time.Millisecond * 3162, time.Millisecond * 3651, time.Millisecond * 4082, time.Millisecond * 4472, + time.Millisecond * 4830, time.Millisecond * 5166, time.Millisecond * 5499, time.Millisecond * 5833, time.Millisecond * 6169, time.Millisecond * 6527, + time.Millisecond * 6917, time.Millisecond * 7348, time.Millisecond * 7837, time.Millisecond * 8418, time.Millisecond * 9174, time.Millisecond * 10999, + }, + et: mustNewExecutionTuple(nil, nil), + timeUnit: time.Second / 3, // three times as fast + }, + // TODO: extend more + } + + for _, testCase := range testCases { + et := testCase.et + expectedTimes := testCase.expectedTimes + config.TimeUnit = types.NewNullDuration(testCase.timeUnit, true) + if testCase.timeUnit == 0 { + config.TimeUnit = types.NewNullDuration(defaultTimeUnit, true) + } + + t.Run(fmt.Sprintf("%s timeunit %s", et, config.TimeUnit), func(t *testing.T) { + ch := make(chan time.Duration) + go config.cal(et, ch) + changes := make([]time.Duration, 0, len(expectedTimes)) + for c := range ch { + changes = append(changes, c) + } + assert.Equal(t, len(expectedTimes), len(changes)) + for i, expectedTime := range expectedTimes { + require.True(t, i < len(changes)) + change := changes[i] + assert.InEpsilon(t, expectedTime, change, 0.001, "%s %s", expectedTime, change) + } + }) + } +} + +func BenchmarkCal(b *testing.B) { + for _, t := range []time.Duration{ + time.Second, time.Minute, + } { + t := t + b.Run(t.String(), func(b *testing.B) { + config := RampingArrivalRateConfig{ + TimeUnit: types.NullDurationFrom(time.Second), + StartRate: null.IntFrom(50), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(t), + Target: null.IntFrom(49), + }, + { + Duration: types.NullDurationFrom(t), + Target: null.IntFrom(50), + }, + }, + } + et := mustNewExecutionTuple(nil, nil) + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + ch := make(chan time.Duration, 20) + go config.cal(et, ch) + for c := range ch { + _ = c + } + } + }) + }) + } +} + +func BenchmarkCalRat(b *testing.B) { + for _, t := range []time.Duration{ + time.Second, time.Minute, + } { + t := t + b.Run(t.String(), func(b *testing.B) { + config := RampingArrivalRateConfig{ + TimeUnit: types.NullDurationFrom(time.Second), + StartRate: null.IntFrom(50), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(t), + Target: null.IntFrom(49), + }, + { + Duration: types.NullDurationFrom(t), + Target: null.IntFrom(50), + }, + }, + } + et := mustNewExecutionTuple(nil, nil) + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + ch := make(chan time.Duration, 20) + go config.calRat(et, ch) + for c := range ch { + _ = c + } + } + }) + }) + } +} + +func TestCompareCalImplementation(t *testing.T) { + t.Parallel() + // This test checks that the cal and calRat implementation get roughly similar numbers + // in my experiment the difference is 1(nanosecond) in 7 case for the whole test + // the duration is 1 second for each stage as calRat takes way longer - a longer better test can + // be done when/if it's performance is improved + config := RampingArrivalRateConfig{ + TimeUnit: types.NullDurationFrom(time.Second), + StartRate: null.IntFrom(0), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(200), + }, + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(200), + }, + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(2000), + }, + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(2000), + }, + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(300), + }, + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(300), + }, + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(1333), + }, + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(1334), + }, + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(1334), + }, + }, + } + + et := mustNewExecutionTuple(nil, nil) + chRat := make(chan time.Duration, 20) + ch := make(chan time.Duration, 20) + go config.calRat(et, chRat) + go config.cal(et, ch) + count := 0 + var diff int + for c := range ch { + count++ + cRat := <-chRat + if !assert.InDelta(t, c, cRat, 1, "%d", count) { + diff++ + } + } + require.Equal(t, 0, diff) +} + +// calRat code here is just to check how accurate the cal implemenattion is +// there are no other tests for it so it depends on the test of cal that it is actually accurate :D + +//nolint:gochecknoglobals +var two = big.NewRat(2, 1) + +// from https://groups.google.com/forum/#!topic/golang-nuts/aIcDf8T-Png +func sqrtRat(x *big.Rat) *big.Rat { + var z, a, b big.Rat + var ns, ds big.Int + ni, di := x.Num(), x.Denom() + z.SetFrac(ns.Rsh(ni, uint(ni.BitLen())/2), ds.Rsh(di, uint(di.BitLen())/2)) + for i := 10; i > 0; i-- { // TODO: better termination + a.Sub(a.Mul(&z, &z), x) + f, _ := a.Float64() + if f == 0 { + break + } + // fmt.Println(x, z, i) + z.Sub(&z, b.Quo(&a, b.Mul(two, &z))) + } + return &z +} + +// This implementation is just for reference and accuracy testing +func (varc RampingArrivalRateConfig) calRat(et *lib.ExecutionTuple, ch chan<- time.Duration) { + defer close(ch) + + start, offsets, _ := et.GetStripedOffsets() + li := -1 + next := func() int64 { + li++ + return offsets[li%len(offsets)] + } + iRat := big.NewRat(start+1, 1) + + carry := big.NewRat(0, 1) + doneSoFar := big.NewRat(0, 1) + endCount := big.NewRat(0, 1) + curr := varc.StartRate.ValueOrZero() + var base time.Duration + for _, stage := range varc.Stages { + target := stage.Target.ValueOrZero() + if target != curr { + var ( + from = big.NewRat(curr, int64(time.Second)) + to = big.NewRat(target, int64(time.Second)) + dur = big.NewRat(time.Duration(stage.Duration.Duration).Nanoseconds(), 1) + ) + // precalcualations :) + toMinusFrom := new(big.Rat).Sub(to, from) + fromSquare := new(big.Rat).Mul(from, from) + durMulSquare := new(big.Rat).Mul(dur, fromSquare) + fromMulDur := new(big.Rat).Mul(from, dur) + oneOverToMinusFrom := new(big.Rat).Inv(toMinusFrom) + + endCount.Add(endCount, + new(big.Rat).Mul( + dur, + new(big.Rat).Add(new(big.Rat).Mul(toMinusFrom, big.NewRat(1, 2)), from))) + for ; endCount.Cmp(iRat) >= 0; iRat.Add(iRat, big.NewRat(next(), 1)) { + // even with all of this optimizations sqrtRat is taking so long this is still + // extremely slow ... :( + buf := new(big.Rat).Sub(iRat, doneSoFar) + buf.Mul(buf, two) + buf.Mul(buf, toMinusFrom) + buf.Add(buf, durMulSquare) + buf.Mul(buf, dur) + buf.Sub(fromMulDur, sqrtRat(buf)) + buf.Mul(buf, oneOverToMinusFrom) + + r, _ := buf.Float64() + ch <- base + time.Duration(-r) // the minus is because we don't deive by from-to but by to-from above + } + } else { + step := big.NewRat(int64(time.Second), target) + first := big.NewRat(0, 1) + first.Sub(first, carry) + endCount.Add(endCount, new(big.Rat).Mul(big.NewRat(target, 1), big.NewRat(time.Duration(stage.Duration.Duration).Nanoseconds(), time.Duration(varc.TimeUnit.Duration).Nanoseconds()))) + + for ; endCount.Cmp(iRat) >= 0; iRat.Add(iRat, big.NewRat(next(), 1)) { + res := new(big.Rat).Sub(iRat, doneSoFar) // this can get next added to it but will need to change the above for .. so + r, _ := res.Mul(res, step).Float64() + ch <- base + time.Duration(r) + first.Add(first, step) + } + } + doneSoFar.Set(endCount) // copy + curr = target + base += time.Duration(stage.Duration.Duration) + } +} diff --git a/lib/executor/ramping_vus.go b/lib/executor/ramping_vus.go new file mode 100644 index 00000000000..08d4a2e1bd7 --- /dev/null +++ b/lib/executor/ramping_vus.go @@ -0,0 +1,692 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats" + "github.com/loadimpact/k6/ui/pb" +) + +const rampingVUsType = "ramping-vus" + +func init() { + lib.RegisterExecutorConfigType( + rampingVUsType, + func(name string, rawJSON []byte) (lib.ExecutorConfig, error) { + config := NewRampingVUsConfig(name) + err := lib.StrictJSONUnmarshal(rawJSON, &config) + return config, err + }, + ) +} + +// Stage contains +type Stage struct { + Duration types.NullDuration `json:"duration"` + Target null.Int `json:"target"` // TODO: maybe rename this to endVUs? something else? + // TODO: add a progression function? +} + +// RampingVUsConfig stores the configuration for the stages executor +type RampingVUsConfig struct { + BaseConfig + StartVUs null.Int `json:"startVUs"` + Stages []Stage `json:"stages"` + GracefulRampDown types.NullDuration `json:"gracefulRampDown"` +} + +// NewRampingVUsConfig returns a RampingVUsConfig with its default values +func NewRampingVUsConfig(name string) RampingVUsConfig { + return RampingVUsConfig{ + BaseConfig: NewBaseConfig(name, rampingVUsType), + StartVUs: null.NewInt(1, false), + GracefulRampDown: types.NewNullDuration(30*time.Second, false), + } +} + +// Make sure we implement the lib.ExecutorConfig interface +var _ lib.ExecutorConfig = &RampingVUsConfig{} + +// GetStartVUs is just a helper method that returns the scaled starting VUs. +func (vlvc RampingVUsConfig) GetStartVUs(et *lib.ExecutionTuple) int64 { + return et.ScaleInt64(vlvc.StartVUs.Int64) +} + +// GetGracefulRampDown is just a helper method that returns the graceful +// ramp-down period as a standard Go time.Duration value... +func (vlvc RampingVUsConfig) GetGracefulRampDown() time.Duration { + return time.Duration(vlvc.GracefulRampDown.Duration) +} + +// GetDescription returns a human-readable description of the executor options +func (vlvc RampingVUsConfig) GetDescription(et *lib.ExecutionTuple) string { + maxVUs := et.ScaleInt64(getStagesUnscaledMaxTarget(vlvc.StartVUs.Int64, vlvc.Stages)) + return fmt.Sprintf("Up to %d looping VUs for %s over %d stages%s", + maxVUs, sumStagesDuration(vlvc.Stages), len(vlvc.Stages), + vlvc.getBaseInfo(fmt.Sprintf("gracefulRampDown: %s", vlvc.GetGracefulRampDown()))) +} + +// Validate makes sure all options are configured and valid +func (vlvc RampingVUsConfig) Validate() []error { + errors := vlvc.BaseConfig.Validate() + if vlvc.StartVUs.Int64 < 0 { + errors = append(errors, fmt.Errorf("the number of start VUs shouldn't be negative")) + } + + return append(errors, validateStages(vlvc.Stages)...) +} + +// getRawExecutionSteps calculates and returns as execution steps the number of +// actively running VUs the executor should have at every moment. +// +// It doesn't take into account graceful ramp-downs. It also doesn't deal with +// the end-of-executor drop to 0 VUs, whether graceful or not. These are +// handled by GetExecutionRequirements(), which internally uses this method and +// reserveVUsForGracefulRampDowns(). +// +// The zeroEnd argument tells the method if we should artificially add a step +// with 0 VUs at offset sum(stages.duration), i.e. when the executor is +// supposed to end. +// +// It's also important to note how scaling works. Say, we ramp up from 0 to 10 +// VUs over 10 seconds and then back to 0, and we want to split the execution in +// 2 equal segments (i.e. execution segments "0:0.5" and "0.5:1"). The original +// execution steps would look something like this: +// +// VUs ^ +// 10| * +// 9| *** +// 8| ***** +// 7| ******* +// 6| ********* +// 5| *********** +// 4| ************* +// 3| *************** +// 2| ***************** +// 1| ******************* +// 0------------------------> time(s) +// 01234567890123456789012 (t%10) +// 00000000001111111111222 (t/10) +// +// The chart for one of the execution segments would look like this: +// +// VUs ^ +// 5| XXX +// 4| XXXXXXX +// 3| XXXXXXXXXXX +// 2| XXXXXXXXXXXXXXX +// 1| XXXXXXXXXXXXXXXXXXX +// 0------------------------> time(s) +// 01234567890123456789012 (t%10) +// 00000000001111111111222 (t/10) +// +// And the chart for the other execution segment would look like this: +// +// VUs ^ +// 5| Y +// 4| YYYYY +// 3| YYYYYYYYY +// 2| YYYYYYYYYYYYY +// 1| YYYYYYYYYYYYYYYYY +// 0------------------------> time(s) +// 01234567890123456789012 (t%10) +// 00000000001111111111222 (t/10) +// +// Notice the time offsets and the slower ramping up and down. All of that is +// because the sum of the two execution segments has to produce exactly the +// original shape, as if the test ran on a single machine: +// +// VUs ^ +// 10| Y +// 9| YYY +// 8| YYYYY +// 7| YYYYYYY +// 6| YYYYYYYYY +// 5| YYYYXXXYYYY +// 4| YYYXXXXXXXYYY +// 3| YYXXXXXXXXXXXYY +// 2| YXXXXXXXXXXXXXXXY +// 1| XXXXXXXXXXXXXXXXXXX +// 0------------------------> time(s) +// 01234567890123456789012 (t%10) +// 00000000001111111111222 (t/10) +// +// More information: https://github.com/loadimpact/k6/issues/997#issuecomment-484416866 +func (vlvc RampingVUsConfig) getRawExecutionSteps(et *lib.ExecutionTuple, zeroEnd bool) []lib.ExecutionStep { + var ( + timeTillEnd time.Duration + fromVUs = vlvc.StartVUs.Int64 + start, offsets, lcd = et.GetStripedOffsets() + steps = make([]lib.ExecutionStep, 0, vlvc.precalculateTheRequiredSteps(et, zeroEnd)) + index = segmentedIndex{start: start, lcd: lcd, offsets: offsets} + ) + + // Reserve the scaled StartVUs at the beginning + steps = append(steps, lib.ExecutionStep{TimeOffset: 0, PlannedVUs: uint64(index.goTo(fromVUs))}) + addStep := func(timeOffset time.Duration, plannedVUs uint64) { + if steps[len(steps)-1].PlannedVUs != plannedVUs { + steps = append(steps, lib.ExecutionStep{TimeOffset: timeOffset, PlannedVUs: plannedVUs}) + } + } + + for _, stage := range vlvc.Stages { + stageEndVUs := stage.Target.Int64 + stageDuration := time.Duration(stage.Duration.Duration) + timeTillEnd += stageDuration + + stageVUDiff := stageEndVUs - fromVUs + if stageVUDiff == 0 { + continue + } + if stageDuration == 0 { + addStep(timeTillEnd, uint64(index.goTo(stageEndVUs))) + fromVUs = stageEndVUs + continue + } + + // VU reservation for gracefully ramping down is handled as a + // separate method: reserveVUsForGracefulRampDowns() + if index.unscaled > stageEndVUs { // ramp down + // here we don't want to emit for the equal to stageEndVUs as it doesn't go below it + // it will just go to it + for ; index.unscaled > stageEndVUs; index.prev() { + addStep( + // this is the time that we should go up 1 if we are ramping up + // but we are ramping down so we should go 1 down, but because we want to not + // stop VUs immediately we stop it on the next unscaled VU's time + timeTillEnd-time.Duration(int64(stageDuration)*(stageEndVUs-index.unscaled+1)/stageVUDiff), + uint64(index.scaled-1), + ) + } + } else { + for ; index.unscaled <= stageEndVUs; index.next() { + addStep( + timeTillEnd-time.Duration(int64(stageDuration)*(stageEndVUs-index.unscaled)/stageVUDiff), + uint64(index.scaled), + ) + } + } + fromVUs = stageEndVUs + } + + if zeroEnd && steps[len(steps)-1].PlannedVUs != 0 { + // If the last PlannedVUs value wasn't 0, add a last step with 0 + steps = append(steps, lib.ExecutionStep{TimeOffset: timeTillEnd, PlannedVUs: 0}) + } + return steps +} + +type segmentedIndex struct { // TODO: rename ... although this is probably the best name so far :D + start, lcd int64 + offsets []int64 + scaled, unscaled int64 // for both the first element(vu) is 1 not 0 +} + +// goes to the next scaled index and move the unscaled one accordingly +func (s *segmentedIndex) next() { + if s.scaled == 0 { // the 1 element(VU) is at the start + s.unscaled += s.start + 1 // the first element of the start 0, but the here we need it to be 1 so we add 1 + } else { // if we are not at the first element we need to go through the offsets, looping over them + s.unscaled += s.offsets[int(s.scaled-1)%len(s.offsets)] // slice's index start at 0 ours start at 1 + } + s.scaled++ +} + +// prev goest to the previous scaled value and sets the unscaled one accordingly +// calling prev when s.scaled == 0 is undefined +func (s *segmentedIndex) prev() { + if s.scaled == 1 { // we are the first need to go to the 0th element which means we need to remove the start + s.unscaled -= s.start + 1 // this could've been just settign to 0 + } else { // not at the first element - need to get the previously added offset so + s.unscaled -= s.offsets[int(s.scaled-2)%len(s.offsets)] // slice's index start 0 our start at 1 + } + s.scaled-- +} + +// goTo sets the scaled index to it's biggest value for which the corresponding unscaled index is +// is smaller or equal to value +func (s *segmentedIndex) goTo(value int64) int64 { // TODO optimize + var gi int64 + // Because of the cyclical nature of the striping algorithm (with a cycle + // length of LCD, the least common denominator), when scaling large values + // (i.e. many multiples of the LCD), we can quickly calculate how many times + // the cycle repeats. + wholeCycles := (value / s.lcd) + // So we can set some approximate initial values quickly, since we also know + // precisely how many scaled values there are per cycle length. + s.scaled = wholeCycles * int64(len(s.offsets)) + s.unscaled = wholeCycles*s.lcd + s.start + 1 // our indexes are from 1 the start is from 0 + // Approach the final value using the slow algorithm with the step by step loop + // TODO: this can be optimized by another array with size offsets that instead of the offsets + // from the previous is the offset from either 0 or start + i := s.start + for ; i < value%s.lcd; gi, i = gi+1, i+s.offsets[gi] { + s.scaled++ + s.unscaled += s.offsets[gi] + } + + if gi > 0 { // there were more values after the wholecycles + // the last offset actually shouldn't have been added + s.unscaled -= s.offsets[gi-1] + } else if s.scaled > 0 { // we didn't actually have more values after the wholecycles but we still had some + // in this case the unscaled value needs to move back by the last offset as it would've been + // the one to get it from the value it needs to be to it's current one + s.unscaled -= s.offsets[len(s.offsets)-1] + } + + if s.scaled == 0 { + s.unscaled = 0 // we would've added the start and 1 + } + + return s.scaled +} + +func absInt64(a int64) int64 { + if a < 0 { + return -a + } + return a +} + +func (vlvc RampingVUsConfig) precalculateTheRequiredSteps(et *lib.ExecutionTuple, zeroEnd bool) int { + p := et.ScaleInt64(vlvc.StartVUs.Int64) + var result int64 + result++ // for the first one + + if zeroEnd { + result++ // for the last one - this one can be more then needed + } + for _, stage := range vlvc.Stages { + stageEndVUs := et.ScaleInt64(stage.Target.Int64) + if stage.Duration.Duration == 0 { + result++ + } else { + result += absInt64(p - stageEndVUs) + } + p = stageEndVUs + } + return int(result) +} + +// If the graceful ramp-downs are enabled, we need to reserve any VUs that may +// potentially have to finish running iterations when we're scaling their number +// down. This would prevent attempts from other executors to use them while the +// iterations are finishing up during their allotted gracefulRampDown periods. +// +// But we also need to be careful to not over-allocate more VUs than we actually +// need. We should never have more PlannedVUs than the max(startVUs, +// stage[n].target), even if we're quickly scaling VUs up and down multiple +// times, one after the other. In those cases, any previously reserved VUs +// finishing up interrupted iterations should be reused by the executor, +// instead of new ones being requested from the execution state. +// +// Here's an example with graceful ramp-down (i.e. "uninterruptible" +// iterations), where stars represent actively scheduled VUs and dots are used +// for VUs that are potentially finishing up iterations: +// +// +// ^ +// | +// VUs 6| *.............................. +// 5| ***.......*.............................. +// 4|*****.....***.....**.............................. +// 3|******...*****...***.............................. +// 2|*******.*******.****.............................. +// 1|***********************.............................. +// 0--------------------------------------------------------> time(s) +// 012345678901234567890123456789012345678901234567890123 (t%10) +// 000000000011111111112222222222333333333344444444445555 (t/10) +// +// We start with 4 VUs, scale to 6, scale down to 1, scale up to 5, scale down +// to 1 again, scale up to 4, back to 1, and finally back down to 0. If our +// gracefulStop timeout was 30s (the default), then we'll stay with 6 PlannedVUs +// until t=32 in the test above, and the actual executor could run until t=52. +// See TestRampingVUsConfigExecutionPlanExample() for the above example +// as a unit test. +// +// The algorithm we use below to reserve VUs so that ramping-down VUs can finish +// their last iterations is pretty simple. It just traverses the raw execution +// steps and whenever there's a scaling down of VUs, it prevents the number of +// VUs from decreasing for the configured gracefulRampDown period. +// +// Finishing up the test, i.e. making sure we have a step with 0 VUs at time +// executorEndOffset, is not handled here. Instead GetExecutionRequirements() +// takes care of that. But to make its job easier, this method won't add any +// steps with an offset that's greater or equal to executorEndOffset. +func (vlvc RampingVUsConfig) reserveVUsForGracefulRampDowns( //nolint:funlen + rawSteps []lib.ExecutionStep, executorEndOffset time.Duration, +) []lib.ExecutionStep { + rawStepsLen := len(rawSteps) + gracefulRampDownPeriod := vlvc.GetGracefulRampDown() + newSteps := []lib.ExecutionStep{} + + lastPlannedVUs := uint64(0) + for rawStepNum := 0; rawStepNum < rawStepsLen; rawStepNum++ { + rawStep := rawSteps[rawStepNum] + // Add the first step or any step where the number of planned VUs is + // greater than the ones in the previous step. We don't need to worry + // about reserving time for ramping-down VUs when the number of planned + // VUs is growing. That's because the gracefulRampDown period is a fixed + // value and any timeouts from early steps with fewer VUs will get + // overshadowed by timeouts from latter steps with more VUs. + if rawStepNum == 0 || rawStep.PlannedVUs > lastPlannedVUs { + newSteps = append(newSteps, rawStep) + lastPlannedVUs = rawStep.PlannedVUs + continue + } + + // We simply skip steps with the same number of planned VUs + if rawStep.PlannedVUs == lastPlannedVUs { + continue + } + + // If we're here, we have a downward "slope" - the lastPlannedVUs are + // more than the current rawStep's planned VUs. We're going to look + // forward in time (up to gracefulRampDown) and inspect the rawSteps. + // There are a 3 possibilities: + // - We find a new step within the gracefulRampDown period which has + // the same number of VUs or greater than lastPlannedVUs. Which + // means that we can just advance rawStepNum to that number and we + // don't need to worry about any of the raw steps in the middle! + // Both their planned VUs and their gracefulRampDown periods will + // be lower than what we're going to set from that new rawStep - + // we've basically found a new upward slope or equal value again. + // - We reach executorEndOffset, in which case we are done - we can't + // add any new steps, since those will be after the executor end + // offset. + // - We reach the end of the rawSteps, or we don't find any higher or + // equal steps to prevStep in the next gracefulRampDown period. So + // we'll simply try to add an entry into newSteps with the values + // {prevStep.TimeOffset + gracefulRampDown, rawStep.PlannedVUs} and + // we'll continue with traversing the following rawSteps. + + skippedToNewRawStep := false + timeOffsetWithTimeout := rawStep.TimeOffset + gracefulRampDownPeriod + + for advStepNum := rawStepNum + 1; advStepNum < rawStepsLen; advStepNum++ { + advStep := rawSteps[advStepNum] + if advStep.TimeOffset > timeOffsetWithTimeout { + break + } + if advStep.PlannedVUs >= lastPlannedVUs { + rawStepNum = advStepNum - 1 + skippedToNewRawStep = true + break + } + } + + // Nothing more to do here, found a new "slope" with equal or grater + // PlannedVUs in the gracefulRampDownPeriod window, so we go to it. + if skippedToNewRawStep { + continue + } + + // We've reached the absolute executor end offset, and we were already + // on a downward "slope" (i.e. the previous planned VUs are more than + // the current planned VUs), so nothing more we can do here. + if timeOffsetWithTimeout >= executorEndOffset { + break + } + + newSteps = append(newSteps, lib.ExecutionStep{ + TimeOffset: timeOffsetWithTimeout, + PlannedVUs: rawStep.PlannedVUs, + }) + lastPlannedVUs = rawStep.PlannedVUs + } + + return newSteps +} + +// GetExecutionRequirements very dynamically reserves exactly the number of +// required VUs for this executor at every moment of the test. +// +// If gracefulRampDown is specified, it will also be taken into account, and the +// number of needed VUs to handle that will also be reserved. See the +// documentation of reserveVUsForGracefulRampDowns() for more details. +// +// On the other hand, gracefulStop is handled here. To facilitate it, we'll +// ensure that the last execution step will have 0 VUs and will be at time +// offset (sum(stages.Duration)+gracefulStop). Any steps that would've been +// added after it will be ignored. Thus: +// - gracefulStop can be less than gracefulRampDown and can cut the graceful +// ramp-down periods of the last VUs short. +// - gracefulRampDown can be more than gracefulStop: +// - If the user manually ramped down VUs at the end of the test (i.e. the +// last stage's target is 0), then this will have no effect. +// - If the last stage's target is more than 0, the VUs at the end of the +// executor's life will have more time to finish their last iterations. +func (vlvc RampingVUsConfig) GetExecutionRequirements(et *lib.ExecutionTuple) []lib.ExecutionStep { + steps := vlvc.getRawExecutionSteps(et, false) + + executorEndOffset := sumStagesDuration(vlvc.Stages) + time.Duration(vlvc.GracefulStop.Duration) + // Handle graceful ramp-downs, if we have them + if vlvc.GracefulRampDown.Duration > 0 { + steps = vlvc.reserveVUsForGracefulRampDowns(steps, executorEndOffset) + } + + // If the last PlannedVUs value wasn't 0, add a last step with 0 + if steps[len(steps)-1].PlannedVUs != 0 { + steps = append(steps, lib.ExecutionStep{TimeOffset: executorEndOffset, PlannedVUs: 0}) + } + + return steps +} + +// NewExecutor creates a new RampingVUs executor +func (vlvc RampingVUsConfig) NewExecutor(es *lib.ExecutionState, logger *logrus.Entry) (lib.Executor, error) { + return RampingVUs{ + BaseExecutor: NewBaseExecutor(vlvc, es, logger), + config: vlvc, + }, nil +} + +// HasWork reports whether there is any work to be done for the given execution segment. +func (vlvc RampingVUsConfig) HasWork(et *lib.ExecutionTuple) bool { + return lib.GetMaxPlannedVUs(vlvc.GetExecutionRequirements(et)) > 0 +} + +// RampingVUs handles the old "stages" execution configuration - it loops +// iterations with a variable number of VUs for the sum of all of the specified +// stages' duration. +type RampingVUs struct { + *BaseExecutor + config RampingVUsConfig +} + +// Make sure we implement the lib.Executor interface. +var _ lib.Executor = &RampingVUs{} + +// Run constantly loops through as many iterations as possible on a variable +// number of VUs for the specified stages. +// +// TODO: split up? since this does a ton of things, unfortunately I can't think +// of a less complex way to implement it (besides the old "increment by 100ms +// and see what happens)... :/ so maybe see how it can be split? +// nolint:funlen,gocognit +func (vlv RampingVUs) Run(parentCtx context.Context, out chan<- stats.SampleContainer) (err error) { + rawExecutionSteps := vlv.config.getRawExecutionSteps(vlv.executionState.ExecutionTuple, true) + regularDuration, isFinal := lib.GetEndOffset(rawExecutionSteps) + if !isFinal { + return fmt.Errorf("%s expected raw end offset at %s to be final", vlv.config.GetName(), regularDuration) + } + + gracefulExecutionSteps := vlv.config.GetExecutionRequirements(vlv.executionState.ExecutionTuple) + maxDuration, isFinal := lib.GetEndOffset(gracefulExecutionSteps) + if !isFinal { + return fmt.Errorf("%s expected graceful end offset at %s to be final", vlv.config.GetName(), maxDuration) + } + maxVUs := lib.GetMaxPlannedVUs(gracefulExecutionSteps) + gracefulStop := maxDuration - regularDuration + + startTime, maxDurationCtx, regDurationCtx, cancel := getDurationContexts(parentCtx, regularDuration, gracefulStop) + defer cancel() + + activeVUs := &sync.WaitGroup{} + defer activeVUs.Wait() + + // Make sure the log and the progress bar have accurate information + vlv.logger.WithFields(logrus.Fields{ + "type": vlv.config.GetType(), "startVUs": vlv.config.GetStartVUs(vlv.executionState.ExecutionTuple), "maxVUs": maxVUs, + "duration": regularDuration, "numStages": len(vlv.config.Stages), + }, + ).Debug("Starting executor run...") + + activeVUsCount := new(int64) + vusFmt := pb.GetFixedLengthIntFormat(int64(maxVUs)) + regularDurationStr := pb.GetFixedLengthDuration(regularDuration, regularDuration) + progressFn := func() (float64, []string) { + spent := time.Since(startTime) + currentlyActiveVUs := atomic.LoadInt64(activeVUsCount) + vus := fmt.Sprintf(vusFmt+"/"+vusFmt+" VUs", currentlyActiveVUs, maxVUs) + if spent > regularDuration { + return 1, []string{vus, regularDuration.String()} + } + progVUs := fmt.Sprintf(vusFmt+"/"+vusFmt+" VUs", currentlyActiveVUs, maxVUs) + progDur := pb.GetFixedLengthDuration(spent, regularDuration) + "/" + regularDurationStr + return float64(spent) / float64(regularDuration), []string{progVUs, progDur} + } + vlv.progress.Modify(pb.WithProgress(progressFn)) + go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, vlv, progressFn) + + // Actually schedule the VUs and iterations, likely the most complicated + // executor among all of them... + runIteration := getIterationRunner(vlv.executionState, vlv.logger) + getVU := func() (lib.InitializedVU, error) { + initVU, err := vlv.executionState.GetPlannedVU(vlv.logger, false) + if err != nil { + vlv.logger.WithError(err).Error("Cannot get a VU from the buffer") + cancel() + } else { + activeVUs.Add(1) + atomic.AddInt64(activeVUsCount, 1) + vlv.executionState.ModCurrentlyActiveVUsCount(+1) + } + return initVU, err + } + returnVU := func(initVU lib.InitializedVU) { + vlv.executionState.ReturnVU(initVU, false) + atomic.AddInt64(activeVUsCount, -1) + activeVUs.Done() + vlv.executionState.ModCurrentlyActiveVUsCount(-1) + } + + vuHandles := make([]*vuHandle, maxVUs) + for i := uint64(0); i < maxVUs; i++ { + vuHandle := newStoppedVUHandle( + maxDurationCtx, getVU, returnVU, &vlv.config.BaseConfig, + vlv.logger.WithField("vuNum", i)) + go vuHandle.runLoopsIfPossible(runIteration) + vuHandles[i] = vuHandle + } + + // 0 <= currentScheduledVUs <= currentMaxAllowedVUs <= maxVUs + var currentScheduledVUs, currentMaxAllowedVUs uint64 + + handleNewScheduledVUs := func(newScheduledVUs uint64) { + if newScheduledVUs > currentScheduledVUs { + for vuNum := currentScheduledVUs; vuNum < newScheduledVUs; vuNum++ { + _ = vuHandles[vuNum].start() // TODO handle error + } + } else { + for vuNum := newScheduledVUs; vuNum < currentScheduledVUs; vuNum++ { + vuHandles[vuNum].gracefulStop() + } + } + currentScheduledVUs = newScheduledVUs + } + + handleNewMaxAllowedVUs := func(newMaxAllowedVUs uint64) { + if newMaxAllowedVUs < currentMaxAllowedVUs { + for vuNum := newMaxAllowedVUs; vuNum < currentMaxAllowedVUs; vuNum++ { + vuHandles[vuNum].hardStop() + } + } + currentMaxAllowedVUs = newMaxAllowedVUs + } + + wait := waiter(parentCtx, startTime) + // iterate over rawExecutionSteps and gracefulExecutionSteps in order by TimeOffset + // giving rawExecutionSteps precedence. + // we stop iterating once rawExecutionSteps are over as we need to run the remaining + // gracefulExecutionSteps concurrently while waiting for VUs to stop in order to not wait until + // the end of gracefulStop timeouts + i, j := 0, 0 + for i != len(rawExecutionSteps) { + if rawExecutionSteps[i].TimeOffset > gracefulExecutionSteps[j].TimeOffset { + if wait(gracefulExecutionSteps[j].TimeOffset) { + return + } + handleNewMaxAllowedVUs(gracefulExecutionSteps[j].PlannedVUs) + j++ + } else { + if wait(rawExecutionSteps[i].TimeOffset) { + return + } + handleNewScheduledVUs(rawExecutionSteps[i].PlannedVUs) + i++ + } + } + + go func() { // iterate over the remaining gracefulExecutionSteps + for _, step := range gracefulExecutionSteps[j:] { + if wait(step.TimeOffset) { + return + } + handleNewMaxAllowedVUs(step.PlannedVUs) + } + }() + + return nil +} + +// waiter returns a function that will sleep/wait for the required time since the startTime and then +// return. If the context was done before that it will return true otherwise it will return false +// TODO use elsewhere +// TODO set startTime here? +// TODO move it to a struct type or something and benchmark if that makes a difference +func waiter(ctx context.Context, startTime time.Time) func(offset time.Duration) bool { + timer := time.NewTimer(time.Hour * 24) + return func(offset time.Duration) bool { + offsetDiff := offset - time.Since(startTime) + if offsetDiff > 0 { // wait until time of event arrives // TODO have a mininum + timer.Reset(offsetDiff) + select { + case <-ctx.Done(): + return true // exit if context is cancelled + case <-timer.C: + // now we do a step + } + } + return false + } +} diff --git a/lib/executor/ramping_vus_test.go b/lib/executor/ramping_vus_test.go new file mode 100644 index 00000000000..bdd2d710f4d --- /dev/null +++ b/lib/executor/ramping_vus_test.go @@ -0,0 +1,1284 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "math/rand" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/types" +) + +func TestRampingVUsRun(t *testing.T) { + t.Parallel() + + config := RampingVUsConfig{ + BaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(0)}, + GracefulRampDown: types.NullDurationFrom(0), + StartVUs: null.IntFrom(5), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(5), + }, + { + Duration: types.NullDurationFrom(0), + Target: null.IntFrom(3), + }, + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(3), + }, + }, + } + + var iterCount int64 + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + ctx, cancel, executor, _ := setupExecutor( + t, config, es, + simpleRunner(func(ctx context.Context) error { + // Sleeping for a weird duration somewhat offset from the + // executor ticks to hopefully keep race conditions out of + // our control from failing the test. + time.Sleep(300 * time.Millisecond) + atomic.AddInt64(&iterCount, 1) + return nil + }), + ) + defer cancel() + + sampleTimes := []time.Duration{ + 500 * time.Millisecond, + 1000 * time.Millisecond, + 900 * time.Millisecond, + } + + errCh := make(chan error) + go func() { errCh <- executor.Run(ctx, nil) }() + + result := make([]int64, len(sampleTimes)) + for i, d := range sampleTimes { + time.Sleep(d) + result[i] = es.GetCurrentlyActiveVUsCount() + } + + require.NoError(t, <-errCh) + + assert.Equal(t, []int64{5, 3, 0}, result) + assert.Equal(t, int64(29), atomic.LoadInt64(&iterCount)) +} + +func TestRampingVUsGracefulStopWaits(t *testing.T) { + t.Parallel() + + config := RampingVUsConfig{ + BaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(time.Second)}, + StartVUs: null.IntFrom(1), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(1), + }, + }, + } + + var ( + started = make(chan struct{}) // the iteration started + stopped = make(chan struct{}) // the iteration stopped + stop = make(chan struct{}) // the itearation should stop + ) + + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + ctx, cancel, executor, _ := setupExecutor( + t, config, es, + simpleRunner(func(ctx context.Context) error { + close(started) + defer close(stopped) + select { + case <-ctx.Done(): + t.Fatal("The iterations should've ended before the context") + case <-stop: + } + return nil + }), + ) + defer cancel() + errCh := make(chan error) + go func() { errCh <- executor.Run(ctx, nil) }() + + <-started + // 500 milliseconds more then the duration and 500 less then the gracefulStop + time.Sleep(time.Millisecond * 1500) + close(stop) + <-stopped + + require.NoError(t, <-errCh) +} + +func TestRampingVUsGracefulStopStops(t *testing.T) { + t.Parallel() + + config := RampingVUsConfig{ + BaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(time.Second)}, + StartVUs: null.IntFrom(1), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(1), + }, + }, + } + + var ( + started = make(chan struct{}) // the iteration started + stopped = make(chan struct{}) // the iteration stopped + stop = make(chan struct{}) // the itearation should stop + ) + + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + ctx, cancel, executor, _ := setupExecutor( + t, config, es, + simpleRunner(func(ctx context.Context) error { + close(started) + defer close(stopped) + select { + case <-ctx.Done(): + case <-stop: + t.Fatal("The iterations shouldn't have ended before the context") + } + return nil + }), + ) + defer cancel() + errCh := make(chan error) + go func() { errCh <- executor.Run(ctx, nil) }() + + <-started + // 500 milliseconds more then the gracefulStop + duration + time.Sleep(time.Millisecond * 2500) + close(stop) + <-stopped + + require.NoError(t, <-errCh) +} + +func TestRampingVUsGracefulRampDown(t *testing.T) { + t.Parallel() + + config := RampingVUsConfig{ + BaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(5 * time.Second)}, + StartVUs: null.IntFrom(2), + GracefulRampDown: types.NullDurationFrom(5 * time.Second), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(2), + }, + { + Duration: types.NullDurationFrom(1 * time.Second), + Target: null.IntFrom(0), + }, + }, + } + + var ( + started = make(chan struct{}) // the iteration started + stopped = make(chan struct{}) // the iteration stopped + stop = make(chan struct{}) // the itearation should stop + ) + + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + ctx, cancel, executor, _ := setupExecutor( + t, config, es, + simpleRunner(func(ctx context.Context) error { + if lib.GetState(ctx).Vu == 1 { // the first VU will wait here to do stuff + close(started) + defer close(stopped) + select { + case <-ctx.Done(): + t.Fatal("The iterations shouldn't have ended before the context") + case <-stop: + } + } else { // all other (1) VUs will just sleep long enough + time.Sleep(2500 * time.Millisecond) + } + return nil + }), + ) + defer cancel() + errCh := make(chan error) + go func() { errCh <- executor.Run(ctx, nil) }() + + <-started + // 500 milliseconds more then the gracefulRampDown + duration + time.Sleep(2500 * time.Millisecond) + close(stop) + <-stopped + + select { + case err := <-errCh: + require.NoError(t, err) + case <-time.After(time.Second): // way too much time + t.Fatal("Execution should've ended already") + } +} + +// Ensure there's no wobble of VUs during graceful ramp-down, without segments. +// See https://github.com/loadimpact/k6/issues/1296 +func TestRampingVUsRampDownNoWobble(t *testing.T) { + t.Parallel() + + config := RampingVUsConfig{ + BaseConfig: BaseConfig{GracefulStop: types.NullDurationFrom(0)}, + GracefulRampDown: types.NullDurationFrom(1 * time.Second), + StartVUs: null.IntFrom(0), + Stages: []Stage{ + { + Duration: types.NullDurationFrom(3 * time.Second), + Target: null.IntFrom(10), + }, + { + Duration: types.NullDurationFrom(2 * time.Second), + Target: null.IntFrom(0), + }, + }, + } + + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + ctx, cancel, executor, _ := setupExecutor( + t, config, es, + simpleRunner(func(ctx context.Context) error { + time.Sleep(500 * time.Millisecond) + return nil + }), + ) + defer cancel() + + sampleTimes := []time.Duration{ + 100 * time.Millisecond, + 3000 * time.Millisecond, + } + const rampDownSampleTime = 50 * time.Millisecond + var rampDownSamples = int(time.Duration( + config.Stages[len(config.Stages)-1].Duration.Duration+config.GracefulRampDown.Duration, + ) / rampDownSampleTime) + + errCh := make(chan error) + go func() { errCh <- executor.Run(ctx, nil) }() + + result := make([]int64, len(sampleTimes)+rampDownSamples) + for i, d := range sampleTimes { + time.Sleep(d) + result[i] = es.GetCurrentlyActiveVUsCount() + } + + // Sample ramp-down at a higher rate + for i := len(sampleTimes); i < rampDownSamples; i++ { + time.Sleep(rampDownSampleTime) + result[i] = es.GetCurrentlyActiveVUsCount() + } + + require.NoError(t, <-errCh) + + // Some baseline checks + assert.Equal(t, int64(0), result[0]) + assert.Equal(t, int64(10), result[1]) + assert.Equal(t, int64(0), result[len(result)-1]) + + vuChanges := []int64{result[2]} + // Check ramp-down consistency + for i := 3; i < len(result[2:]); i++ { + if result[i] != result[i-1] { + vuChanges = append(vuChanges, result[i]) + } + } + assert.Equal(t, []int64{10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, vuChanges) +} + +func TestRampingVUsConfigExecutionPlanExample(t *testing.T) { + t.Parallel() + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + conf := NewRampingVUsConfig("test") + conf.StartVUs = null.IntFrom(4) + conf.Stages = []Stage{ + {Target: null.IntFrom(6), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(1), Duration: types.NullDurationFrom(5 * time.Second)}, + {Target: null.IntFrom(5), Duration: types.NullDurationFrom(4 * time.Second)}, + {Target: null.IntFrom(1), Duration: types.NullDurationFrom(4 * time.Second)}, + {Target: null.IntFrom(4), Duration: types.NullDurationFrom(3 * time.Second)}, + {Target: null.IntFrom(4), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(1), Duration: types.NullDurationFrom(0 * time.Second)}, + {Target: null.IntFrom(1), Duration: types.NullDurationFrom(3 * time.Second)}, + } + + expRawStepsNoZeroEnd := []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 4}, + {TimeOffset: 1 * time.Second, PlannedVUs: 5}, + {TimeOffset: 2 * time.Second, PlannedVUs: 6}, + {TimeOffset: 3 * time.Second, PlannedVUs: 5}, + {TimeOffset: 4 * time.Second, PlannedVUs: 4}, + {TimeOffset: 5 * time.Second, PlannedVUs: 3}, + {TimeOffset: 6 * time.Second, PlannedVUs: 2}, + {TimeOffset: 7 * time.Second, PlannedVUs: 1}, + {TimeOffset: 8 * time.Second, PlannedVUs: 2}, + {TimeOffset: 9 * time.Second, PlannedVUs: 3}, + {TimeOffset: 10 * time.Second, PlannedVUs: 4}, + {TimeOffset: 11 * time.Second, PlannedVUs: 5}, + {TimeOffset: 12 * time.Second, PlannedVUs: 4}, + {TimeOffset: 13 * time.Second, PlannedVUs: 3}, + {TimeOffset: 14 * time.Second, PlannedVUs: 2}, + {TimeOffset: 15 * time.Second, PlannedVUs: 1}, + {TimeOffset: 16 * time.Second, PlannedVUs: 2}, + {TimeOffset: 17 * time.Second, PlannedVUs: 3}, + {TimeOffset: 18 * time.Second, PlannedVUs: 4}, + {TimeOffset: 20 * time.Second, PlannedVUs: 1}, + } + rawStepsNoZeroEnd := conf.getRawExecutionSteps(et, false) + assert.Equal(t, expRawStepsNoZeroEnd, rawStepsNoZeroEnd) + endOffset, isFinal := lib.GetEndOffset(rawStepsNoZeroEnd) + assert.Equal(t, 20*time.Second, endOffset) + assert.Equal(t, false, isFinal) + + rawStepsZeroEnd := conf.getRawExecutionSteps(et, true) + assert.Equal(t, + append(expRawStepsNoZeroEnd, lib.ExecutionStep{TimeOffset: 23 * time.Second, PlannedVUs: 0}), + rawStepsZeroEnd, + ) + endOffset, isFinal = lib.GetEndOffset(rawStepsZeroEnd) + assert.Equal(t, 23*time.Second, endOffset) + assert.Equal(t, true, isFinal) + + // GracefulStop and GracefulRampDown equal to the default 30 sec + assert.Equal(t, []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 4}, + {TimeOffset: 1 * time.Second, PlannedVUs: 5}, + {TimeOffset: 2 * time.Second, PlannedVUs: 6}, + {TimeOffset: 33 * time.Second, PlannedVUs: 5}, + {TimeOffset: 42 * time.Second, PlannedVUs: 4}, + {TimeOffset: 50 * time.Second, PlannedVUs: 1}, + {TimeOffset: 53 * time.Second, PlannedVUs: 0}, + }, conf.GetExecutionRequirements(et)) + + // Try a longer GracefulStop than the GracefulRampDown + conf.GracefulStop = types.NullDurationFrom(80 * time.Second) + assert.Equal(t, []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 4}, + {TimeOffset: 1 * time.Second, PlannedVUs: 5}, + {TimeOffset: 2 * time.Second, PlannedVUs: 6}, + {TimeOffset: 33 * time.Second, PlannedVUs: 5}, + {TimeOffset: 42 * time.Second, PlannedVUs: 4}, + {TimeOffset: 50 * time.Second, PlannedVUs: 1}, + {TimeOffset: 103 * time.Second, PlannedVUs: 0}, + }, conf.GetExecutionRequirements(et)) + + // Try a much shorter GracefulStop than the GracefulRampDown + conf.GracefulStop = types.NullDurationFrom(3 * time.Second) + assert.Equal(t, []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 4}, + {TimeOffset: 1 * time.Second, PlannedVUs: 5}, + {TimeOffset: 2 * time.Second, PlannedVUs: 6}, + {TimeOffset: 26 * time.Second, PlannedVUs: 0}, + }, conf.GetExecutionRequirements(et)) + + // Try a zero GracefulStop + conf.GracefulStop = types.NullDurationFrom(0 * time.Second) + assert.Equal(t, []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 4}, + {TimeOffset: 1 * time.Second, PlannedVUs: 5}, + {TimeOffset: 2 * time.Second, PlannedVUs: 6}, + {TimeOffset: 23 * time.Second, PlannedVUs: 0}, + }, conf.GetExecutionRequirements(et)) + + // Try a zero GracefulStop and GracefulRampDown, i.e. raw steps with 0 end cap + conf.GracefulRampDown = types.NullDurationFrom(0 * time.Second) + assert.Equal(t, rawStepsZeroEnd, conf.GetExecutionRequirements(et)) +} + +func TestRampingVUsConfigExecutionPlanExampleOneThird(t *testing.T) { + t.Parallel() + et, err := lib.NewExecutionTuple(newExecutionSegmentFromString("0:1/3"), nil) + require.NoError(t, err) + conf := NewRampingVUsConfig("test") + conf.StartVUs = null.IntFrom(4) + conf.Stages = []Stage{ + {Target: null.IntFrom(6), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(1), Duration: types.NullDurationFrom(5 * time.Second)}, + {Target: null.IntFrom(5), Duration: types.NullDurationFrom(4 * time.Second)}, + {Target: null.IntFrom(1), Duration: types.NullDurationFrom(4 * time.Second)}, + {Target: null.IntFrom(4), Duration: types.NullDurationFrom(3 * time.Second)}, + {Target: null.IntFrom(4), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(1), Duration: types.NullDurationFrom(0 * time.Second)}, + {Target: null.IntFrom(1), Duration: types.NullDurationFrom(3 * time.Second)}, + } + + expRawStepsNoZeroEnd := []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 1}, + {TimeOffset: 1 * time.Second, PlannedVUs: 2}, + {TimeOffset: 4 * time.Second, PlannedVUs: 1}, + {TimeOffset: 7 * time.Second, PlannedVUs: 0}, + {TimeOffset: 8 * time.Second, PlannedVUs: 1}, + {TimeOffset: 11 * time.Second, PlannedVUs: 2}, + {TimeOffset: 12 * time.Second, PlannedVUs: 1}, + {TimeOffset: 15 * time.Second, PlannedVUs: 0}, + {TimeOffset: 16 * time.Second, PlannedVUs: 1}, + {TimeOffset: 20 * time.Second, PlannedVUs: 0}, + } + rawStepsNoZeroEnd := conf.getRawExecutionSteps(et, false) + assert.Equal(t, expRawStepsNoZeroEnd, rawStepsNoZeroEnd) + endOffset, isFinal := lib.GetEndOffset(rawStepsNoZeroEnd) + assert.Equal(t, 20*time.Second, endOffset) + assert.Equal(t, true, isFinal) + + rawStepsZeroEnd := conf.getRawExecutionSteps(et, true) + assert.Equal(t, expRawStepsNoZeroEnd, rawStepsZeroEnd) + endOffset, isFinal = lib.GetEndOffset(rawStepsZeroEnd) + assert.Equal(t, 20*time.Second, endOffset) + assert.Equal(t, true, isFinal) + + // GracefulStop and GracefulRampDown equal to the default 30 sec + assert.Equal(t, []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 1}, + {TimeOffset: 1 * time.Second, PlannedVUs: 2}, + {TimeOffset: 42 * time.Second, PlannedVUs: 1}, + {TimeOffset: 50 * time.Second, PlannedVUs: 0}, + }, conf.GetExecutionRequirements(et)) + + // Try a longer GracefulStop than the GracefulRampDown + conf.GracefulStop = types.NullDurationFrom(80 * time.Second) + assert.Equal(t, []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 1}, + {TimeOffset: 1 * time.Second, PlannedVUs: 2}, + {TimeOffset: 42 * time.Second, PlannedVUs: 1}, + {TimeOffset: 50 * time.Second, PlannedVUs: 0}, + }, conf.GetExecutionRequirements(et)) + + // Try a much shorter GracefulStop than the GracefulRampDown + conf.GracefulStop = types.NullDurationFrom(3 * time.Second) + assert.Equal(t, []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 1}, + {TimeOffset: 1 * time.Second, PlannedVUs: 2}, + {TimeOffset: 26 * time.Second, PlannedVUs: 0}, + }, conf.GetExecutionRequirements(et)) + + // Try a zero GracefulStop + conf.GracefulStop = types.NullDurationFrom(0 * time.Second) + assert.Equal(t, []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 1}, + {TimeOffset: 1 * time.Second, PlannedVUs: 2}, + {TimeOffset: 23 * time.Second, PlannedVUs: 0}, + }, conf.GetExecutionRequirements(et)) + + // Try a zero GracefulStop and GracefulRampDown, i.e. raw steps with 0 end cap + conf.GracefulRampDown = types.NullDurationFrom(0 * time.Second) + assert.Equal(t, rawStepsZeroEnd, conf.GetExecutionRequirements(et)) +} + +func TestRampingVUsExecutionTupleTests(t *testing.T) { + t.Parallel() + + conf := NewRampingVUsConfig("test") + conf.StartVUs = null.IntFrom(4) + conf.Stages = []Stage{ + {Target: null.IntFrom(6), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(1), Duration: types.NullDurationFrom(5 * time.Second)}, + {Target: null.IntFrom(5), Duration: types.NullDurationFrom(4 * time.Second)}, + {Target: null.IntFrom(1), Duration: types.NullDurationFrom(4 * time.Second)}, + {Target: null.IntFrom(4), Duration: types.NullDurationFrom(3 * time.Second)}, + {Target: null.IntFrom(4), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(1), Duration: types.NullDurationFrom(0 * time.Second)}, + {Target: null.IntFrom(1), Duration: types.NullDurationFrom(3 * time.Second)}, + {Target: null.IntFrom(5), Duration: types.NullDurationFrom(0 * time.Second)}, + {Target: null.IntFrom(5), Duration: types.NullDurationFrom(3 * time.Second)}, + {Target: null.IntFrom(0), Duration: types.NullDurationFrom(0 * time.Second)}, + {Target: null.IntFrom(2), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(0), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(4), Duration: types.NullDurationFrom(4 * time.Second)}, + } + /* + + Graph of the above: + ^ + 8 | + 7 | + 6 | + + 5 |/ \ + +--+ + 4 + \ / \ +-+ | | * + 3 | \ / \ / | | | / + 2 | \ / \ / | | | + / + 1 | + + +--+ |/ \ / + 0 +-------------------------+---+------------------------------> + 01234567890123456789012345678901234567890 + + */ + + testCases := []struct { + expectedSteps []lib.ExecutionStep + et *lib.ExecutionTuple + }{ + { + et: mustNewExecutionTuple(nil, nil), + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 4}, + {TimeOffset: 1 * time.Second, PlannedVUs: 5}, + {TimeOffset: 2 * time.Second, PlannedVUs: 6}, + {TimeOffset: 3 * time.Second, PlannedVUs: 5}, + {TimeOffset: 4 * time.Second, PlannedVUs: 4}, + {TimeOffset: 5 * time.Second, PlannedVUs: 3}, + {TimeOffset: 6 * time.Second, PlannedVUs: 2}, + {TimeOffset: 7 * time.Second, PlannedVUs: 1}, + {TimeOffset: 8 * time.Second, PlannedVUs: 2}, + {TimeOffset: 9 * time.Second, PlannedVUs: 3}, + {TimeOffset: 10 * time.Second, PlannedVUs: 4}, + {TimeOffset: 11 * time.Second, PlannedVUs: 5}, + {TimeOffset: 12 * time.Second, PlannedVUs: 4}, + {TimeOffset: 13 * time.Second, PlannedVUs: 3}, + {TimeOffset: 14 * time.Second, PlannedVUs: 2}, + {TimeOffset: 15 * time.Second, PlannedVUs: 1}, + {TimeOffset: 16 * time.Second, PlannedVUs: 2}, + {TimeOffset: 17 * time.Second, PlannedVUs: 3}, + {TimeOffset: 18 * time.Second, PlannedVUs: 4}, + {TimeOffset: 20 * time.Second, PlannedVUs: 1}, + {TimeOffset: 23 * time.Second, PlannedVUs: 5}, + {TimeOffset: 26 * time.Second, PlannedVUs: 0}, + {TimeOffset: 27 * time.Second, PlannedVUs: 1}, + {TimeOffset: 28 * time.Second, PlannedVUs: 2}, + {TimeOffset: 29 * time.Second, PlannedVUs: 1}, + {TimeOffset: 30 * time.Second, PlannedVUs: 0}, + {TimeOffset: 31 * time.Second, PlannedVUs: 1}, + {TimeOffset: 32 * time.Second, PlannedVUs: 2}, + {TimeOffset: 33 * time.Second, PlannedVUs: 3}, + {TimeOffset: 34 * time.Second, PlannedVUs: 4}, + }, + }, + { + et: mustNewExecutionTuple(newExecutionSegmentFromString("0:1/3"), nil), + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 1}, + {TimeOffset: 1 * time.Second, PlannedVUs: 2}, + {TimeOffset: 4 * time.Second, PlannedVUs: 1}, + {TimeOffset: 7 * time.Second, PlannedVUs: 0}, + {TimeOffset: 8 * time.Second, PlannedVUs: 1}, + {TimeOffset: 11 * time.Second, PlannedVUs: 2}, + {TimeOffset: 12 * time.Second, PlannedVUs: 1}, + {TimeOffset: 15 * time.Second, PlannedVUs: 0}, + {TimeOffset: 16 * time.Second, PlannedVUs: 1}, + {TimeOffset: 20 * time.Second, PlannedVUs: 0}, + {TimeOffset: 23 * time.Second, PlannedVUs: 2}, + {TimeOffset: 26 * time.Second, PlannedVUs: 0}, + {TimeOffset: 28 * time.Second, PlannedVUs: 1}, + {TimeOffset: 29 * time.Second, PlannedVUs: 0}, + {TimeOffset: 32 * time.Second, PlannedVUs: 1}, + }, + }, + { + et: mustNewExecutionTuple(newExecutionSegmentFromString("0:1/3"), newExecutionSegmentSequenceFromString("0,1/3,1")), + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 1}, + {TimeOffset: 1 * time.Second, PlannedVUs: 2}, + {TimeOffset: 4 * time.Second, PlannedVUs: 1}, + {TimeOffset: 7 * time.Second, PlannedVUs: 0}, + {TimeOffset: 8 * time.Second, PlannedVUs: 1}, + {TimeOffset: 11 * time.Second, PlannedVUs: 2}, + {TimeOffset: 12 * time.Second, PlannedVUs: 1}, + {TimeOffset: 15 * time.Second, PlannedVUs: 0}, + {TimeOffset: 16 * time.Second, PlannedVUs: 1}, + {TimeOffset: 20 * time.Second, PlannedVUs: 0}, + {TimeOffset: 23 * time.Second, PlannedVUs: 2}, + {TimeOffset: 26 * time.Second, PlannedVUs: 0}, + {TimeOffset: 28 * time.Second, PlannedVUs: 1}, + {TimeOffset: 29 * time.Second, PlannedVUs: 0}, + {TimeOffset: 32 * time.Second, PlannedVUs: 1}, + }, + }, + { + et: mustNewExecutionTuple(newExecutionSegmentFromString("1/3:2/3"), nil), + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 1}, + {TimeOffset: 1 * time.Second, PlannedVUs: 2}, + {TimeOffset: 4 * time.Second, PlannedVUs: 1}, + {TimeOffset: 7 * time.Second, PlannedVUs: 0}, + {TimeOffset: 8 * time.Second, PlannedVUs: 1}, + {TimeOffset: 11 * time.Second, PlannedVUs: 2}, + {TimeOffset: 12 * time.Second, PlannedVUs: 1}, + {TimeOffset: 15 * time.Second, PlannedVUs: 0}, + {TimeOffset: 16 * time.Second, PlannedVUs: 1}, + {TimeOffset: 20 * time.Second, PlannedVUs: 0}, + {TimeOffset: 23 * time.Second, PlannedVUs: 2}, + {TimeOffset: 26 * time.Second, PlannedVUs: 0}, + {TimeOffset: 28 * time.Second, PlannedVUs: 1}, + {TimeOffset: 29 * time.Second, PlannedVUs: 0}, + {TimeOffset: 32 * time.Second, PlannedVUs: 1}, + }, + }, + { + et: mustNewExecutionTuple(newExecutionSegmentFromString("2/3:1"), nil), + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 1}, + {TimeOffset: 1 * time.Second, PlannedVUs: 2}, + {TimeOffset: 4 * time.Second, PlannedVUs: 1}, + {TimeOffset: 7 * time.Second, PlannedVUs: 0}, + {TimeOffset: 8 * time.Second, PlannedVUs: 1}, + {TimeOffset: 11 * time.Second, PlannedVUs: 2}, + {TimeOffset: 12 * time.Second, PlannedVUs: 1}, + {TimeOffset: 15 * time.Second, PlannedVUs: 0}, + {TimeOffset: 16 * time.Second, PlannedVUs: 1}, + {TimeOffset: 20 * time.Second, PlannedVUs: 0}, + {TimeOffset: 23 * time.Second, PlannedVUs: 2}, + {TimeOffset: 26 * time.Second, PlannedVUs: 0}, + {TimeOffset: 28 * time.Second, PlannedVUs: 1}, + {TimeOffset: 29 * time.Second, PlannedVUs: 0}, + {TimeOffset: 32 * time.Second, PlannedVUs: 1}, + }, + }, + { + et: mustNewExecutionTuple(newExecutionSegmentFromString("0:1/3"), newExecutionSegmentSequenceFromString("0,1/3,2/3,1")), + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 2}, + {TimeOffset: 5 * time.Second, PlannedVUs: 1}, + {TimeOffset: 10 * time.Second, PlannedVUs: 2}, + {TimeOffset: 13 * time.Second, PlannedVUs: 1}, + {TimeOffset: 18 * time.Second, PlannedVUs: 2}, + {TimeOffset: 20 * time.Second, PlannedVUs: 1}, + {TimeOffset: 23 * time.Second, PlannedVUs: 2}, + {TimeOffset: 26 * time.Second, PlannedVUs: 0}, + {TimeOffset: 27 * time.Second, PlannedVUs: 1}, + {TimeOffset: 30 * time.Second, PlannedVUs: 0}, + {TimeOffset: 31 * time.Second, PlannedVUs: 1}, + {TimeOffset: 34 * time.Second, PlannedVUs: 2}, + }, + }, + { + et: mustNewExecutionTuple(newExecutionSegmentFromString("1/3:2/3"), newExecutionSegmentSequenceFromString("0,1/3,2/3,1")), + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 1}, + {TimeOffset: 1 * time.Second, PlannedVUs: 2}, + {TimeOffset: 4 * time.Second, PlannedVUs: 1}, + {TimeOffset: 7 * time.Second, PlannedVUs: 0}, + {TimeOffset: 8 * time.Second, PlannedVUs: 1}, + {TimeOffset: 11 * time.Second, PlannedVUs: 2}, + {TimeOffset: 12 * time.Second, PlannedVUs: 1}, + {TimeOffset: 15 * time.Second, PlannedVUs: 0}, + {TimeOffset: 16 * time.Second, PlannedVUs: 1}, + {TimeOffset: 20 * time.Second, PlannedVUs: 0}, + {TimeOffset: 23 * time.Second, PlannedVUs: 2}, + {TimeOffset: 26 * time.Second, PlannedVUs: 0}, + {TimeOffset: 28 * time.Second, PlannedVUs: 1}, + {TimeOffset: 29 * time.Second, PlannedVUs: 0}, + {TimeOffset: 32 * time.Second, PlannedVUs: 1}, + }, + }, + { + et: mustNewExecutionTuple(newExecutionSegmentFromString("2/3:1"), newExecutionSegmentSequenceFromString("0,1/3,2/3,1")), + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 1}, + {TimeOffset: 2 * time.Second, PlannedVUs: 2}, + {TimeOffset: 3 * time.Second, PlannedVUs: 1}, + {TimeOffset: 6 * time.Second, PlannedVUs: 0}, + {TimeOffset: 9 * time.Second, PlannedVUs: 1}, + {TimeOffset: 14 * time.Second, PlannedVUs: 0}, + {TimeOffset: 17 * time.Second, PlannedVUs: 1}, + {TimeOffset: 20 * time.Second, PlannedVUs: 0}, + {TimeOffset: 23 * time.Second, PlannedVUs: 1}, + {TimeOffset: 26 * time.Second, PlannedVUs: 0}, + {TimeOffset: 33 * time.Second, PlannedVUs: 1}, + }, + }, + } + + for _, testCase := range testCases { + et := testCase.et + expectedSteps := testCase.expectedSteps + + t.Run(et.String(), func(t *testing.T) { + rawStepsNoZeroEnd := conf.getRawExecutionSteps(et, false) + assert.Equal(t, expectedSteps, rawStepsNoZeroEnd) + }) + } +} + +func TestRampingVUsGetRawExecutionStepsCornerCases(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + expectedSteps []lib.ExecutionStep + et *lib.ExecutionTuple + stages []Stage + start int64 + }{ + { + name: "going up then down straight away", + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 2}, + {TimeOffset: 0 * time.Second, PlannedVUs: 5}, + {TimeOffset: 1 * time.Second, PlannedVUs: 4}, + {TimeOffset: 2 * time.Second, PlannedVUs: 3}, + }, + stages: []Stage{ + {Target: null.IntFrom(5), Duration: types.NullDurationFrom(0 * time.Second)}, + {Target: null.IntFrom(3), Duration: types.NullDurationFrom(2 * time.Second)}, + }, + start: 2, + }, + { + name: "jump up then go up again", + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 3}, + {TimeOffset: 1 * time.Second, PlannedVUs: 4}, + {TimeOffset: 2 * time.Second, PlannedVUs: 5}, + }, + stages: []Stage{ + {Target: null.IntFrom(5), Duration: types.NullDurationFrom(2 * time.Second)}, + }, + start: 3, + }, + { + name: "up down up down", + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 0}, + {TimeOffset: 1 * time.Second, PlannedVUs: 1}, + {TimeOffset: 2 * time.Second, PlannedVUs: 2}, + {TimeOffset: 3 * time.Second, PlannedVUs: 1}, + {TimeOffset: 4 * time.Second, PlannedVUs: 0}, + {TimeOffset: 5 * time.Second, PlannedVUs: 1}, + {TimeOffset: 6 * time.Second, PlannedVUs: 2}, + {TimeOffset: 7 * time.Second, PlannedVUs: 1}, + {TimeOffset: 8 * time.Second, PlannedVUs: 0}, + }, + stages: []Stage{ + {Target: null.IntFrom(2), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(0), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(2), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(0), Duration: types.NullDurationFrom(2 * time.Second)}, + }, + }, + { + name: "up down up down in half", + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 0}, + {TimeOffset: 1 * time.Second, PlannedVUs: 1}, + {TimeOffset: 4 * time.Second, PlannedVUs: 0}, + {TimeOffset: 5 * time.Second, PlannedVUs: 1}, + {TimeOffset: 8 * time.Second, PlannedVUs: 0}, + }, + et: mustNewExecutionTuple(newExecutionSegmentFromString("0:1/2"), nil), + stages: []Stage{ + {Target: null.IntFrom(2), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(0), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(2), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(0), Duration: types.NullDurationFrom(2 * time.Second)}, + }, + }, + { + name: "up down up down in the other half", + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 0}, + {TimeOffset: 2 * time.Second, PlannedVUs: 1}, + {TimeOffset: 3 * time.Second, PlannedVUs: 0}, + {TimeOffset: 6 * time.Second, PlannedVUs: 1}, + {TimeOffset: 7 * time.Second, PlannedVUs: 0}, + }, + et: mustNewExecutionTuple(newExecutionSegmentFromString("1/2:1"), nil), + stages: []Stage{ + {Target: null.IntFrom(2), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(0), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(2), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(0), Duration: types.NullDurationFrom(2 * time.Second)}, + }, + }, + { + name: "up down up down in with nothing", + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 0}, + }, + et: mustNewExecutionTuple(newExecutionSegmentFromString("2/3:1"), newExecutionSegmentSequenceFromString("0,1/3,2/3,1")), + stages: []Stage{ + {Target: null.IntFrom(2), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(0), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(2), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(0), Duration: types.NullDurationFrom(2 * time.Second)}, + }, + }, + { + name: "up down up down in with funky sequence", // panics if there are no localIndex == 0 guards + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 0}, + {TimeOffset: 1 * time.Second, PlannedVUs: 1}, + {TimeOffset: 4 * time.Second, PlannedVUs: 0}, + {TimeOffset: 5 * time.Second, PlannedVUs: 1}, + {TimeOffset: 8 * time.Second, PlannedVUs: 0}, + }, + et: mustNewExecutionTuple(newExecutionSegmentFromString("0:1/3"), newExecutionSegmentSequenceFromString("0,1/3,1/2,2/3,1")), + stages: []Stage{ + {Target: null.IntFrom(2), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(0), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(2), Duration: types.NullDurationFrom(2 * time.Second)}, + {Target: null.IntFrom(0), Duration: types.NullDurationFrom(2 * time.Second)}, + }, + }, + { + name: "strange", + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 0}, + {TimeOffset: 1 * time.Second, PlannedVUs: 1}, + {TimeOffset: 5 * time.Second, PlannedVUs: 2}, + {TimeOffset: 8 * time.Second, PlannedVUs: 3}, + {TimeOffset: 11 * time.Second, PlannedVUs: 4}, + {TimeOffset: 15 * time.Second, PlannedVUs: 5}, + {TimeOffset: 18 * time.Second, PlannedVUs: 6}, + {TimeOffset: 23 * time.Second, PlannedVUs: 7}, + {TimeOffset: 35 * time.Second, PlannedVUs: 8}, + {TimeOffset: 44 * time.Second, PlannedVUs: 9}, + }, + et: mustNewExecutionTuple(newExecutionSegmentFromString("0:0.3"), newExecutionSegmentSequenceFromString("0,0.3,0.6,0.9,1")), + stages: []Stage{ + {Target: null.IntFrom(20), Duration: types.NullDurationFrom(20 * time.Second)}, + {Target: null.IntFrom(30), Duration: types.NullDurationFrom(30 * time.Second)}, + }, + }, + { + name: "more up and down", + expectedSteps: []lib.ExecutionStep{ + {TimeOffset: 0 * time.Second, PlannedVUs: 0}, + {TimeOffset: 1 * time.Second, PlannedVUs: 1}, + {TimeOffset: 2 * time.Second, PlannedVUs: 2}, + {TimeOffset: 3 * time.Second, PlannedVUs: 3}, + {TimeOffset: 4 * time.Second, PlannedVUs: 4}, + {TimeOffset: 5 * time.Second, PlannedVUs: 5}, + {TimeOffset: 6 * time.Second, PlannedVUs: 4}, + {TimeOffset: 7 * time.Second, PlannedVUs: 3}, + {TimeOffset: 8 * time.Second, PlannedVUs: 2}, + {TimeOffset: 9 * time.Second, PlannedVUs: 1}, + {TimeOffset: 10 * time.Second, PlannedVUs: 0}, + }, + stages: []Stage{ + {Target: null.IntFrom(5), Duration: types.NullDurationFrom(5 * time.Second)}, + {Target: null.IntFrom(0), Duration: types.NullDurationFrom(5 * time.Second)}, + }, + }, + } + + for _, testCase := range testCases { + conf := NewRampingVUsConfig("test") + conf.StartVUs = null.IntFrom(testCase.start) + conf.Stages = testCase.stages + et := testCase.et + if et == nil { + et = mustNewExecutionTuple(nil, nil) + } + expectedSteps := testCase.expectedSteps + + t.Run(testCase.name, func(t *testing.T) { + rawStepsNoZeroEnd := conf.getRawExecutionSteps(et, false) + assert.Equal(t, expectedSteps, rawStepsNoZeroEnd) + }) + } +} + +func BenchmarkRampingVUsGetRawExecutionSteps(b *testing.B) { + testCases := []struct { + seq string + seg string + }{ + {}, + {seg: "0:1"}, + {seq: "0,0.3,0.5,0.6,0.7,0.8,0.9,1", seg: "0:0.3"}, + {seq: "0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1", seg: "0:0.1"}, + {seg: "2/5:4/5"}, + {seg: "2235/5213:4/5"}, // just wanted it to be ugly ;D + } + + stageCases := []struct { + name string + stages string + }{ + { + name: "normal", + stages: `[{"duration":"5m", "target":5000},{"duration":"5m", "target":5000},{"duration":"5m", "target":10000},{"duration":"5m", "target":10000}]`, + }, { + name: "rollercoaster", + stages: `[{"duration":"5m", "target":5000},{"duration":"5m", "target":0}, + {"duration":"5m", "target":5000},{"duration":"5m", "target":0}, + {"duration":"5m", "target":5000},{"duration":"5m", "target":0}, + {"duration":"5m", "target":5000},{"duration":"5m", "target":0}, + {"duration":"5m", "target":5000},{"duration":"5m", "target":0}, + {"duration":"5m", "target":5000},{"duration":"5m", "target":0}, + {"duration":"5m", "target":5000},{"duration":"5m", "target":0}, + {"duration":"5m", "target":5000},{"duration":"5m", "target":0}, + {"duration":"5m", "target":5000},{"duration":"5m", "target":0}, + {"duration":"5m", "target":5000},{"duration":"5m", "target":0}, + {"duration":"5m", "target":5000},{"duration":"5m", "target":0}]`, + }, + } + for _, tc := range testCases { + tc := tc + b.Run(fmt.Sprintf("seq:%s;segment:%s", tc.seq, tc.seg), func(b *testing.B) { + ess, err := lib.NewExecutionSegmentSequenceFromString(tc.seq) + require.NoError(b, err) + segment, err := lib.NewExecutionSegmentFromString(tc.seg) + require.NoError(b, err) + if tc.seg == "" { + segment = nil // specifically for the optimization + } + et, err := lib.NewExecutionTuple(segment, &ess) + require.NoError(b, err) + for _, stageCase := range stageCases { + var st []Stage + require.NoError(b, json.Unmarshal([]byte(stageCase.stages), &st)) + vlvc := RampingVUsConfig{ + Stages: st, + } + b.Run(stageCase.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = vlvc.getRawExecutionSteps(et, false) + } + }) + } + }) + } +} + +func TestSegmentedIndex(t *testing.T) { + // TODO ... more structure ? + t.Run("full", func(t *testing.T) { + s := segmentedIndex{start: 0, lcd: 1, offsets: []int64{1}} + + s.next() + assert.EqualValues(t, 1, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + + s.next() + assert.EqualValues(t, 1, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.next() + assert.EqualValues(t, 3, s.unscaled) + assert.EqualValues(t, 3, s.scaled) + + s.prev() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.prev() + assert.EqualValues(t, 1, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + }) + + t.Run("half", func(t *testing.T) { + s := segmentedIndex{start: 0, lcd: 2, offsets: []int64{2}} + + s.next() + assert.EqualValues(t, 1, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + + s.next() + assert.EqualValues(t, 1, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.next() + assert.EqualValues(t, 3, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.next() + assert.EqualValues(t, 5, s.unscaled) + assert.EqualValues(t, 3, s.scaled) + + s.prev() + assert.EqualValues(t, 3, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.prev() + assert.EqualValues(t, 1, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + + s.next() + assert.EqualValues(t, 1, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + }) + + t.Run("the other half", func(t *testing.T) { + s := segmentedIndex{start: 1, lcd: 2, offsets: []int64{2}} + + s.next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + + s.next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.next() + assert.EqualValues(t, 4, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.next() + assert.EqualValues(t, 6, s.unscaled) + assert.EqualValues(t, 3, s.scaled) + + s.prev() + assert.EqualValues(t, 4, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.prev() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + + s.next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + }) + + t.Run("strange", func(t *testing.T) { + s := segmentedIndex{start: 1, lcd: 7, offsets: []int64{4, 3}} + + s.next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + + s.next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.next() + assert.EqualValues(t, 6, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.next() + assert.EqualValues(t, 9, s.unscaled) + assert.EqualValues(t, 3, s.scaled) + + s.prev() + assert.EqualValues(t, 6, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.prev() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + + s.next() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.goTo(6) + assert.EqualValues(t, 6, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.goTo(5) + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.goTo(7) + assert.EqualValues(t, 6, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.goTo(8) + assert.EqualValues(t, 6, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.goTo(9) + assert.EqualValues(t, 9, s.unscaled) + assert.EqualValues(t, 3, s.scaled) + + s.prev() + assert.EqualValues(t, 6, s.unscaled) + assert.EqualValues(t, 2, s.scaled) + + s.prev() + assert.EqualValues(t, 2, s.unscaled) + assert.EqualValues(t, 1, s.scaled) + + s.prev() + assert.EqualValues(t, 0, s.unscaled) + assert.EqualValues(t, 0, s.scaled) + }) +} + +// TODO: delete in favor of lib.generateRandomSequence() after +// https://github.com/loadimpact/k6/issues/1302 is done (can't import now due to +// import loops...) +func generateRandomSequence(t testing.TB, n, m int64, r *rand.Rand) lib.ExecutionSegmentSequence { + var err error + ess := lib.ExecutionSegmentSequence(make([]*lib.ExecutionSegment, n)) + numerators := make([]int64, n) + var denominator int64 + for i := int64(0); i < n; i++ { + numerators[i] = 1 + r.Int63n(m) + denominator += numerators[i] + } + from := big.NewRat(0, 1) + for i := int64(0); i < n; i++ { + to := new(big.Rat).Add(big.NewRat(numerators[i], denominator), from) + ess[i], err = lib.NewExecutionSegment(from, to) + require.NoError(t, err) + from = to + } + + return ess +} + +func TestSumRandomSegmentSequenceMatchesNoSegment(t *testing.T) { + t.Parallel() + + seed := time.Now().UnixNano() + r := rand.New(rand.NewSource(seed)) + t.Logf("Random source seeded with %d\n", seed) + + const ( + numTests = 10 + maxStages = 10 + minStageDuration = 1 * time.Second + maxStageDuration = 10 * time.Minute + maxVUs = 300 + segmentSeqMaxLen = 15 + maxNumerator = 300 + ) + getTestConfig := func(name string) RampingVUsConfig { + stagesCount := 1 + r.Int31n(maxStages) + stages := make([]Stage, stagesCount) + for s := int32(0); s < stagesCount; s++ { + dur := (minStageDuration + time.Duration(r.Int63n(int64(maxStageDuration-minStageDuration)))).Round(time.Second) + stages[s] = Stage{Duration: types.NullDurationFrom(dur), Target: null.IntFrom(r.Int63n(maxVUs))} + } + + c := NewRampingVUsConfig(name) + c.GracefulRampDown = types.NullDurationFrom(0) + c.GracefulStop = types.NullDurationFrom(0) + c.StartVUs = null.IntFrom(r.Int63n(maxVUs)) + c.Stages = stages + return c + } + + subtractChildSteps := func(t *testing.T, parent, child []lib.ExecutionStep) { + t.Logf("subtractChildSteps()") + for _, step := range child { + t.Logf(" child planned VUs for time offset %s: %d", step.TimeOffset, step.PlannedVUs) + } + sub := uint64(0) + ci := 0 + for pi, p := range parent { + // We iterate over all parent steps and match them to child steps. + // Once we have a match, we remove the child step's plannedVUs from + // the parent steps until a new match, when we adjust the subtracted + // amount again. + if p.TimeOffset > child[ci].TimeOffset && ci != len(child)-1 { + t.Errorf("ERR Could not match child offset %s with any parent time offset", child[ci].TimeOffset) + } + if p.TimeOffset == child[ci].TimeOffset { + t.Logf("Setting sub to %d at t=%s", child[ci].PlannedVUs, child[ci].TimeOffset) + sub = child[ci].PlannedVUs + if ci != len(child)-1 { + ci++ + } + } + t.Logf("Subtracting %d VUs (out of %d) at t=%s", sub, p.PlannedVUs, p.TimeOffset) + parent[pi].PlannedVUs -= sub + } + } + + for i := 0; i < numTests; i++ { + name := fmt.Sprintf("random%02d", i) + t.Run(name, func(t *testing.T) { + c := getTestConfig(name) + ranSeqLen := 2 + r.Int63n(segmentSeqMaxLen-1) + t.Logf("Config: %#v, ranSeqLen: %d", c, ranSeqLen) + randomSequence := generateRandomSequence(t, ranSeqLen, maxNumerator, r) + t.Logf("Random sequence: %s", randomSequence) + fullSeg, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + fullRawSteps := c.getRawExecutionSteps(fullSeg, false) + + for _, step := range fullRawSteps { + t.Logf("original planned VUs for time offset %s: %d", step.TimeOffset, step.PlannedVUs) + } + + for s := 0; s < len(randomSequence); s++ { + et, err := lib.NewExecutionTuple(randomSequence[s], &randomSequence) + require.NoError(t, err) + segRawSteps := c.getRawExecutionSteps(et, false) + subtractChildSteps(t, fullRawSteps, segRawSteps) + } + + for _, step := range fullRawSteps { + if step.PlannedVUs != 0 { + t.Errorf("ERR Remaining planned VUs for time offset %s are not 0 but %d", step.TimeOffset, step.PlannedVUs) + } + } + }) + } +} diff --git a/lib/executor/shared_iterations.go b/lib/executor/shared_iterations.go new file mode 100644 index 00000000000..9367a25c9c3 --- /dev/null +++ b/lib/executor/shared_iterations.go @@ -0,0 +1,278 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/metrics" + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats" + "github.com/loadimpact/k6/ui/pb" +) + +const sharedIterationsType = "shared-iterations" + +func init() { + lib.RegisterExecutorConfigType( + sharedIterationsType, + func(name string, rawJSON []byte) (lib.ExecutorConfig, error) { + config := NewSharedIterationsConfig(name) + err := lib.StrictJSONUnmarshal(rawJSON, &config) + return config, err + }, + ) +} + +// SharedIterationsConfig stores the number of VUs iterations, as well as maxDuration settings +type SharedIterationsConfig struct { + BaseConfig + VUs null.Int `json:"vus"` + Iterations null.Int `json:"iterations"` + MaxDuration types.NullDuration `json:"maxDuration"` +} + +// NewSharedIterationsConfig returns a SharedIterationsConfig with default values +func NewSharedIterationsConfig(name string) SharedIterationsConfig { + return SharedIterationsConfig{ + BaseConfig: NewBaseConfig(name, sharedIterationsType), + VUs: null.NewInt(1, false), + Iterations: null.NewInt(1, false), + MaxDuration: types.NewNullDuration(10*time.Minute, false), // TODO: shorten? + } +} + +// Make sure we implement the lib.ExecutorConfig interface +var _ lib.ExecutorConfig = &SharedIterationsConfig{} + +// GetVUs returns the scaled VUs for the executor. +func (sic SharedIterationsConfig) GetVUs(et *lib.ExecutionTuple) int64 { + return et.ScaleInt64(sic.VUs.Int64) +} + +// GetIterations returns the scaled iteration count for the executor. +func (sic SharedIterationsConfig) GetIterations(et *lib.ExecutionTuple) int64 { + // TODO: Optimize this by probably changing the whole Config API + newTuple, err := et.GetNewExecutionTupleFromValue(sic.VUs.Int64) + if err != nil { + return 0 + } + return newTuple.ScaleInt64(sic.Iterations.Int64) +} + +// GetDescription returns a human-readable description of the executor options +func (sic SharedIterationsConfig) GetDescription(et *lib.ExecutionTuple) string { + return fmt.Sprintf("%d iterations shared among %d VUs%s", + sic.GetIterations(et), sic.GetVUs(et), + sic.getBaseInfo(fmt.Sprintf("maxDuration: %s", sic.MaxDuration.Duration))) +} + +// Validate makes sure all options are configured and valid +func (sic SharedIterationsConfig) Validate() []error { + errors := sic.BaseConfig.Validate() + if sic.VUs.Int64 <= 0 { + errors = append(errors, fmt.Errorf("the number of VUs should be more than 0")) + } + + if sic.Iterations.Int64 < sic.VUs.Int64 { + errors = append(errors, fmt.Errorf( + "the number of iterations (%d) shouldn't be less than the number of VUs (%d)", + sic.Iterations.Int64, sic.VUs.Int64, + )) + } + + if time.Duration(sic.MaxDuration.Duration) < minDuration { + errors = append(errors, fmt.Errorf( + "the maxDuration should be at least %s, but is %s", minDuration, sic.MaxDuration, + )) + } + + return errors +} + +// GetExecutionRequirements returns the number of required VUs to run the +// executor for its whole duration (disregarding any startTime), including the +// maximum waiting time for any iterations to gracefully stop. This is used by +// the execution scheduler in its VU reservation calculations, so it knows how +// many VUs to pre-initialize. +func (sic SharedIterationsConfig) GetExecutionRequirements(et *lib.ExecutionTuple) []lib.ExecutionStep { + vus := sic.GetVUs(et) + if vus == 0 { + return []lib.ExecutionStep{ + { + TimeOffset: 0, + PlannedVUs: 0, + }, + } + } + + return []lib.ExecutionStep{ + { + TimeOffset: 0, + PlannedVUs: uint64(vus), + }, + { + TimeOffset: time.Duration(sic.MaxDuration.Duration + sic.GracefulStop.Duration), + PlannedVUs: 0, + }, + } +} + +// NewExecutor creates a new SharedIterations executor +func (sic SharedIterationsConfig) NewExecutor( + es *lib.ExecutionState, logger *logrus.Entry, +) (lib.Executor, error) { + return &SharedIterations{ + BaseExecutor: NewBaseExecutor(sic, es, logger), + config: sic, + }, nil +} + +// SharedIterations executes a specific total number of iterations, which are +// all shared by the configured VUs. +type SharedIterations struct { + *BaseExecutor + config SharedIterationsConfig + et *lib.ExecutionTuple +} + +// Make sure we implement the lib.Executor interface. +var _ lib.Executor = &SharedIterations{} + +// HasWork reports whether there is any work to be done for the given execution segment. +func (sic SharedIterationsConfig) HasWork(et *lib.ExecutionTuple) bool { + return sic.GetVUs(et) > 0 && sic.GetIterations(et) > 0 +} + +// Init values needed for the execution +func (si *SharedIterations) Init(ctx context.Context) error { + // err should always be nil, because Init() won't be called for executors + // with no work, as determined by their config's HasWork() method. + et, err := si.BaseExecutor.executionState.ExecutionTuple.GetNewExecutionTupleFromValue(si.config.VUs.Int64) + si.et = et + return err +} + +// Run executes a specific total number of iterations, which are all shared by +// the configured VUs. +// nolint:funlen +func (si SharedIterations) Run(parentCtx context.Context, out chan<- stats.SampleContainer) (err error) { + numVUs := si.config.GetVUs(si.executionState.ExecutionTuple) + iterations := si.et.ScaleInt64(si.config.Iterations.Int64) + duration := time.Duration(si.config.MaxDuration.Duration) + gracefulStop := si.config.GetGracefulStop() + + startTime, maxDurationCtx, regDurationCtx, cancel := getDurationContexts(parentCtx, duration, gracefulStop) + defer cancel() + + // Make sure the log and the progress bar have accurate information + si.logger.WithFields(logrus.Fields{ + "vus": numVUs, "iterations": iterations, "maxDuration": duration, "type": si.config.GetType(), + }).Debug("Starting executor run...") + + totalIters := uint64(iterations) + doneIters := new(uint64) + vusFmt := pb.GetFixedLengthIntFormat(numVUs) + itersFmt := pb.GetFixedLengthIntFormat(int64(totalIters)) + progressFn := func() (float64, []string) { + spent := time.Since(startTime) + progVUs := fmt.Sprintf(vusFmt+" VUs", numVUs) + currentDoneIters := atomic.LoadUint64(doneIters) + progIters := fmt.Sprintf(itersFmt+"/"+itersFmt+" shared iters", + currentDoneIters, totalIters) + spentDuration := pb.GetFixedLengthDuration(spent, duration) + progDur := fmt.Sprintf("%s/%s", spentDuration, duration) + right := []string{progVUs, progDur, progIters} + + return float64(currentDoneIters) / float64(totalIters), right + } + si.progress.Modify(pb.WithProgress(progressFn)) + go trackProgress(parentCtx, maxDurationCtx, regDurationCtx, &si, progressFn) + + var attemptedIters uint64 + + // Actually schedule the VUs and iterations... + activeVUs := &sync.WaitGroup{} + defer func() { + activeVUs.Wait() + if attemptedIters < totalIters { + stats.PushIfNotDone(parentCtx, out, stats.Sample{ + Value: float64(totalIters - attemptedIters), Metric: metrics.DroppedIterations, + Tags: si.getMetricTags(nil), Time: time.Now(), + }) + } + }() + + regDurationDone := regDurationCtx.Done() + runIteration := getIterationRunner(si.executionState, si.logger) + + activationParams := getVUActivationParams(maxDurationCtx, si.config.BaseConfig, + func(u lib.InitializedVU) { + si.executionState.ReturnVU(u, true) + activeVUs.Done() + }) + handleVU := func(initVU lib.InitializedVU) { + ctx, cancel := context.WithCancel(maxDurationCtx) + defer cancel() + + newParams := *activationParams + newParams.RunContext = ctx + + activeVU := initVU.Activate(&newParams) + + for { + select { + case <-regDurationDone: + return // don't make more iterations + default: + // continue looping + } + + attemptedIterNumber := atomic.AddUint64(&attemptedIters, 1) + if attemptedIterNumber > totalIters { + return + } + + runIteration(maxDurationCtx, activeVU) + atomic.AddUint64(doneIters, 1) + } + } + + for i := int64(0); i < numVUs; i++ { + initVU, err := si.executionState.GetPlannedVU(si.logger, true) + if err != nil { + cancel() + return err + } + activeVUs.Add(1) + go handleVU(initVU) + } + + return nil +} diff --git a/lib/executor/shared_iterations_test.go b/lib/executor/shared_iterations_test.go new file mode 100644 index 00000000000..65f0d73bc1c --- /dev/null +++ b/lib/executor/shared_iterations_test.go @@ -0,0 +1,143 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/metrics" + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats" +) + +func getTestSharedIterationsConfig() SharedIterationsConfig { + return SharedIterationsConfig{ + VUs: null.IntFrom(10), + Iterations: null.IntFrom(100), + MaxDuration: types.NullDurationFrom(5 * time.Second), + } +} + +// Baseline test +func TestSharedIterationsRun(t *testing.T) { + t.Parallel() + var doneIters uint64 + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + var ctx, cancel, executor, _ = setupExecutor( + t, getTestSharedIterationsConfig(), es, + simpleRunner(func(ctx context.Context) error { + atomic.AddUint64(&doneIters, 1) + return nil + }), + ) + defer cancel() + err = executor.Run(ctx, nil) + require.NoError(t, err) + assert.Equal(t, uint64(100), doneIters) +} + +// Test that when one VU "slows down", others will pick up the workload. +// This is the reverse behavior of the PerVUIterations executor. +func TestSharedIterationsRunVariableVU(t *testing.T) { + t.Parallel() + var ( + result sync.Map + slowVUID int64 + ) + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + var ctx, cancel, executor, _ = setupExecutor( + t, getTestSharedIterationsConfig(), es, + simpleRunner(func(ctx context.Context) error { + time.Sleep(10 * time.Millisecond) // small wait to stabilize the test + state := lib.GetState(ctx) + // Pick one VU randomly and always slow it down. + sid := atomic.LoadInt64(&slowVUID) + if sid == int64(0) { + atomic.StoreInt64(&slowVUID, state.Vu) + } + if sid == state.Vu { + time.Sleep(200 * time.Millisecond) + } + currIter, _ := result.LoadOrStore(state.Vu, uint64(0)) + result.Store(state.Vu, currIter.(uint64)+1) + return nil + }), + ) + defer cancel() + err = executor.Run(ctx, nil) + require.NoError(t, err) + + var totalIters uint64 + result.Range(func(key, value interface{}) bool { + totalIters += value.(uint64) + return true + }) + + // The slow VU should complete 2 iterations given these timings, + // while the rest should randomly complete the other 98 iterations. + val, ok := result.Load(slowVUID) + assert.True(t, ok) + assert.Equal(t, uint64(2), val) + assert.Equal(t, uint64(100), totalIters) +} + +func TestSharedIterationsEmitDroppedIterations(t *testing.T) { + t.Parallel() + var count int64 + et, err := lib.NewExecutionTuple(nil, nil) + require.NoError(t, err) + + config := &SharedIterationsConfig{ + VUs: null.IntFrom(5), + Iterations: null.IntFrom(100), + MaxDuration: types.NullDurationFrom(1 * time.Second), + } + + es := lib.NewExecutionState(lib.Options{}, et, 10, 50) + ctx, cancel, executor, logHook := setupExecutor( + t, config, es, + simpleRunner(func(ctx context.Context) error { + atomic.AddInt64(&count, 1) + <-ctx.Done() + return nil + }), + ) + defer cancel() + engineOut := make(chan stats.SampleContainer, 1000) + err = executor.Run(ctx, engineOut) + require.NoError(t, err) + assert.Empty(t, logHook.Drain()) + assert.Equal(t, int64(5), count) + assert.Equal(t, float64(95), sumMetricValues(engineOut, metrics.DroppedIterations.Name)) +} diff --git a/lib/executor/vu_handle.go b/lib/executor/vu_handle.go new file mode 100644 index 00000000000..ef894b13bd8 --- /dev/null +++ b/lib/executor/vu_handle.go @@ -0,0 +1,279 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package executor + +import ( + "context" + "sync" + "sync/atomic" + + "github.com/sirupsen/logrus" + + "github.com/loadimpact/k6/lib" +) + +type stateType int32 + +// states +const ( + stopped stateType = iota + starting + running + toGracefulStop + toHardStop +) + +/* +the below is a state transition table (https://en.wikipedia.org/wiki/State-transition_table) +short names for input: +- start is the method start +- loop is a loop of runLoopsIfPossible +- grace is the method gracefulStop +- hard is the method hardStop ++-------+-------------------------------------+---------------------------------------------------+ +| input | current | next state | notes | ++-------+-------------------------------------+---------------------------------------------------+ +| start | stopped | starting | normal | +| start | starting | starting | nothing | +| start | running | running | nothing | +| start | toGracefulStop | running | we raced with the loop stopping, just continue | +| start | toHardStop | starting | same as stopped really | +| loop | stopped | stopped | we actually are blocked on canStartIter | +| loop | starting | running | get new VU and context | +| loop | running | running | usually fast path | +| loop | toGracefulStop | stopped | cancel the context and make new one | +| loop | toHardStop | stopped | cancel the context and make new one | +| grace | stopped | stopped | nothing | +| grace | starting | stopped | cancel the context to return the VU | +| grace | running | toGracefulStop | normal one, the actual work is in the loop | +| grace | toGracefulStop | toGracefulStop | nothing | +| grace | toHardSTop | toHardStop | nothing | +| hard | stopped | stopped | nothing | +| hard | starting | stopped | short circuit as in the grace case, not necessary | +| hard | running | toHardStop | normal, cancel context and reinitialize it | +| hard | toGracefulStop | toHardStop | normal, cancel context and reinitialize it | +| hard | toHardStop | toHardStop | nothing | ++-------+-----------------+-------------------+----------------------------------------------------+ +*/ + +// This is a helper type used in executors where we have to dynamically control +// the number of VUs that are simultaneously running. For the moment, it is used +// in the RampingVUs and the ExternallyControlled executors. +// Notes on the implementation requirements: +// - it needs to be able to start and stop VUs in thread safe fashion +// - for each call to getVU there must be 1 (and only 1) call to returnVU +// - gracefulStop must let an iteration which has started to finish. For reasons of ease of +// implementation and lack of good evidence it's not required to let a not started iteration to +// finish in other words if you call start and then gracefulStop, there is no requirement for +// 1 iteration to have been started. +// - hardStop must stop an iteration in process +// - it's not required but preferable, if where possible to not reactivate VUs and to reuse context +// as this speed ups the execution +type vuHandle struct { + mutex *sync.Mutex + parentCtx context.Context + getVU func() (lib.InitializedVU, error) + returnVU func(lib.InitializedVU) + config *BaseConfig + + initVU lib.InitializedVU + activeVU lib.ActiveVU + canStartIter chan struct{} + + state stateType // see the table above for meanings + // stateH []int32 // helper for debugging + + ctx context.Context + cancel func() + logger *logrus.Entry +} + +func newStoppedVUHandle( + parentCtx context.Context, getVU func() (lib.InitializedVU, error), + returnVU func(lib.InitializedVU), config *BaseConfig, logger *logrus.Entry, +) *vuHandle { + ctx, cancel := context.WithCancel(parentCtx) + + return &vuHandle{ + mutex: &sync.Mutex{}, + parentCtx: parentCtx, + getVU: getVU, + config: config, + + canStartIter: make(chan struct{}), + state: stopped, + + ctx: ctx, + cancel: cancel, + logger: logger, + returnVU: returnVU, + } +} + +func (vh *vuHandle) start() (err error) { + vh.mutex.Lock() + defer vh.mutex.Unlock() + + switch vh.state { + case starting, running: + return nil // nothing to do + case toGracefulStop: // we raced with the loop, lets not return the vu just to get it back + vh.logger.Debug("Start") + close(vh.canStartIter) + vh.changeState(running) + case stopped, toHardStop: // we need to reactivate the VU and remake the context for it + vh.logger.Debug("Start") + vh.initVU, err = vh.getVU() + if err != nil { + return err + } + + vh.activeVU = vh.initVU.Activate(getVUActivationParams(vh.ctx, *vh.config, vh.returnVU)) + close(vh.canStartIter) + vh.changeState(starting) + } + return nil +} + +// just a helper function for debugging +func (vh *vuHandle) changeState(newState stateType) { + // vh.stateH = append(vh.stateH, newState) + atomic.StoreInt32((*int32)(&vh.state), int32(newState)) +} + +func (vh *vuHandle) gracefulStop() { + vh.mutex.Lock() + defer vh.mutex.Unlock() + switch vh.state { + case toGracefulStop, toHardStop, stopped: + return // nothing to do + case starting: // we raced with the loop and apparently it won't do a single iteration + vh.cancel() + vh.ctx, vh.cancel = context.WithCancel(vh.parentCtx) + vh.changeState(stopped) + case running: + vh.changeState(toGracefulStop) + } + + vh.logger.Debug("Graceful stop") + vh.canStartIter = make(chan struct{}) +} + +func (vh *vuHandle) hardStop() { + vh.mutex.Lock() + defer vh.mutex.Unlock() + + switch vh.state { + case toHardStop, stopped: + return // nothing to do + case starting: // we raced with the loop and apparently it won't do a single iteration + vh.changeState(stopped) + case running, toGracefulStop: + vh.changeState(toHardStop) + } + vh.logger.Debug("Hard stop") + vh.cancel() + vh.ctx, vh.cancel = context.WithCancel(vh.parentCtx) + vh.canStartIter = make(chan struct{}) +} + +// runLoopsIfPossible is where all the fun is :D. Unfortunately somewhere we need to check most +// of the cases and this is where this happens. +func (vh *vuHandle) runLoopsIfPossible(runIter func(context.Context, lib.ActiveVU) bool) { + // We can probably initialize here, but it's also easier to just use the slow path in the second + // part of the for loop + defer func() { + // not sure if this is needed, because here the parentCtx is canceled and I would argue it doesn't matter + // if we set the correct state + vh.mutex.Lock() + vh.changeState(stopped) + vh.mutex.Unlock() + }() + + var ( + executorDone = vh.parentCtx.Done() + ctx context.Context + cancel func() + vu lib.ActiveVU + ) + + for { + state := stateType(atomic.LoadInt32((*int32)(&vh.state))) + if state == running && runIter(ctx, vu) { // fast path + continue + } + + // slow path - something has changed - get what and wait until we can do more iterations + vh.mutex.Lock() + select { + case <-executorDone: + // The whole executor is done, nothing more to do. + vh.mutex.Unlock() + return + default: + } + + switch vh.state { + case running: // start raced us toGracefulStop + vh.mutex.Unlock() + continue + case toGracefulStop: + if cancel != nil { + // we need to cancel the context, to return the vu + // and because *we* did, lets reinitialize it + cancel() + vh.ctx, vh.cancel = context.WithCancel(vh.parentCtx) + } + fallthrough // to set the state + case toHardStop: + // we have *now* stopped + vh.changeState(stopped) + case stopped, starting: + // there is nothing to do + } + + canStartIter := vh.canStartIter + ctx = vh.ctx + vh.mutex.Unlock() + + // We're are stopped, but the executor isn't done yet, so we wait + // for either one of those conditions. + select { + case <-canStartIter: // we can start again + vh.mutex.Lock() + select { + case <-vh.canStartIter: // we check again in case of race + // reinitialize + vu, ctx, cancel = vh.activeVU, vh.ctx, vh.cancel + vh.changeState(running) + default: + // well we got raced to here by something ... loop again ... + } + vh.mutex.Unlock() + case <-ctx.Done(): + // hardStop was called, start a fresh iteration to get the new + // context and signal channel + case <-executorDone: + // The whole executor is done, nothing more to do. + return + } + } +} diff --git a/lib/executor/vu_handle_test.go b/lib/executor/vu_handle_test.go new file mode 100644 index 00000000000..0198a265776 --- /dev/null +++ b/lib/executor/vu_handle_test.go @@ -0,0 +1,418 @@ +package executor + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/lib/testutils" + "github.com/loadimpact/k6/lib/testutils/minirunner" + "github.com/loadimpact/k6/stats" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// this test is mostly interesting when -race is enabled +func TestVUHandleRace(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logHook := &testutils.SimpleLogrusHook{HookedLevels: []logrus.Level{logrus.DebugLevel}} + testLog := logrus.New() + testLog.AddHook(logHook) + testLog.SetOutput(testutils.NewTestOutput(t)) + // testLog.Level = logrus.DebugLevel + logEntry := logrus.NewEntry(testLog) + + var getVUCount int64 + var returnVUCount int64 + getVU := func() (lib.InitializedVU, error) { + atomic.AddInt64(&getVUCount, 1) + return &minirunner.VU{ + R: &minirunner.MiniRunner{ + Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { + // TODO: do something + return nil + }, + }, + }, nil + } + + returnVU := func(_ lib.InitializedVU) { + atomic.AddInt64(&returnVUCount, 1) + // do something + } + var interruptedIter int64 + var fullIterations int64 + + runIter := func(ctx context.Context, vu lib.ActiveVU) bool { + _ = vu.RunOnce() + select { + case <-ctx.Done(): + // Don't log errors or emit iterations metrics from cancelled iterations + atomic.AddInt64(&interruptedIter, 1) + return false + default: + atomic.AddInt64(&fullIterations, 1) + return true + } + } + + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, &BaseConfig{}, logEntry) + go vuHandle.runLoopsIfPossible(runIter) + var wg sync.WaitGroup + wg.Add(3) + go func() { + defer wg.Done() + for i := 0; i < 10000; i++ { + err := vuHandle.start() + require.NoError(t, err) + } + }() + + go func() { + defer wg.Done() + for i := 0; i < 1000; i++ { + vuHandle.gracefulStop() + time.Sleep(1 * time.Nanosecond) + } + }() + + go func() { + defer wg.Done() + for i := 0; i < 100; i++ { + vuHandle.hardStop() + time.Sleep(10 * time.Nanosecond) + } + }() + wg.Wait() + vuHandle.hardStop() // STOP it + time.Sleep(time.Millisecond * 50) + interruptedBefore := atomic.LoadInt64(&interruptedIter) + fullBefore := atomic.LoadInt64(&fullIterations) + _ = vuHandle.start() + time.Sleep(time.Millisecond * 50) // just to be sure an iteration will squeeze in + cancel() + time.Sleep(time.Millisecond * 50) + interruptedAfter := atomic.LoadInt64(&interruptedIter) + fullAfter := atomic.LoadInt64(&fullIterations) + assert.True(t, interruptedBefore >= interruptedAfter-1, + "too big of a difference %d >= %d - 1", interruptedBefore, interruptedAfter) + assert.True(t, fullBefore+1 <= fullAfter, + "too small of a difference %d + 1 <= %d", fullBefore, fullAfter) + require.Equal(t, atomic.LoadInt64(&getVUCount), atomic.LoadInt64(&returnVUCount)) +} + +// this test is mostly interesting when -race is enabled +func TestVUHandleStartStopRace(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logHook := &testutils.SimpleLogrusHook{HookedLevels: []logrus.Level{logrus.DebugLevel}} + testLog := logrus.New() + testLog.AddHook(logHook) + testLog.SetOutput(testutils.NewTestOutput(t)) + // testLog.Level = logrus.DebugLevel + logEntry := logrus.NewEntry(testLog) + + var vuID int64 = -1 + + var testIterations = 10000 + returned := make(chan struct{}) + getVU := func() (lib.InitializedVU, error) { + returned = make(chan struct{}) + return &minirunner.VU{ + ID: atomic.AddInt64(&vuID, 1), + R: &minirunner.MiniRunner{ + Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { + // TODO: do something + return nil + }, + }, + }, nil + } + + returnVU := func(v lib.InitializedVU) { + require.Equal(t, atomic.LoadInt64(&vuID), v.(*minirunner.VU).ID) + close(returned) + } + var interruptedIter int64 + var fullIterations int64 + + runIter := func(ctx context.Context, vu lib.ActiveVU) bool { + _ = vu.RunOnce() + select { + case <-ctx.Done(): + // Don't log errors or emit iterations metrics from cancelled iterations + atomic.AddInt64(&interruptedIter, 1) + return false + default: + atomic.AddInt64(&fullIterations, 1) + return true + } + } + + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, &BaseConfig{}, logEntry) + go vuHandle.runLoopsIfPossible(runIter) + for i := 0; i < testIterations; i++ { + err := vuHandle.start() + vuHandle.gracefulStop() + require.NoError(t, err) + select { + case <-returned: + case <-time.After(100 * time.Millisecond): + go panic("returning took too long") + time.Sleep(time.Second) + } + } + + vuHandle.hardStop() // STOP it + time.Sleep(time.Millisecond * 5) + interruptedBefore := atomic.LoadInt64(&interruptedIter) + fullBefore := atomic.LoadInt64(&fullIterations) + _ = vuHandle.start() + time.Sleep(time.Millisecond * 50) // just to be sure an iteration will squeeze in + cancel() + time.Sleep(time.Millisecond * 5) + interruptedAfter := atomic.LoadInt64(&interruptedIter) + fullAfter := atomic.LoadInt64(&fullIterations) + assert.True(t, interruptedBefore >= interruptedAfter-1, + "too big of a difference %d >= %d - 1", interruptedBefore, interruptedAfter) + assert.True(t, fullBefore+1 <= fullAfter, + "too small of a difference %d + 1 <= %d", fullBefore, fullAfter) +} + +func TestVUHandleSimple(t *testing.T) { + t.Parallel() + + logHook := &testutils.SimpleLogrusHook{HookedLevels: []logrus.Level{logrus.DebugLevel}} + testLog := logrus.New() + testLog.AddHook(logHook) + testLog.SetOutput(testutils.NewTestOutput(t)) + // testLog.Level = logrus.DebugLevel + logEntry := logrus.NewEntry(testLog) + + var ( + getVUCount uint32 + returnVUCount uint32 + interruptedIter int64 + fullIterations int64 + ) + reset := func() { + getVUCount = 0 + returnVUCount = 0 + interruptedIter = 0 + fullIterations = 0 + } + + getVU := func() (lib.InitializedVU, error) { //nolint:unparam + atomic.AddUint32(&getVUCount, 1) + + return &minirunner.VU{ + R: &minirunner.MiniRunner{ + Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { + // TODO: do something + return nil + }, + }, + }, nil + } + + returnVU := func(_ lib.InitializedVU) { + atomic.AddUint32(&returnVUCount, 1) + } + + runIter := func(ctx context.Context, _ lib.ActiveVU) bool { + select { + case <-time.After(time.Second): + case <-ctx.Done(): + } + + select { + case <-ctx.Done(): + // Don't log errors or emit iterations metrics from cancelled iterations + atomic.AddInt64(&interruptedIter, 1) + return false + default: + atomic.AddInt64(&fullIterations, 1) + return true + } + } + t.Run("start before gracefulStop finishes", func(t *testing.T) { + reset() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, &BaseConfig{}, logEntry) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + vuHandle.runLoopsIfPossible(runIter) + }() + err := vuHandle.start() + require.NoError(t, err) + time.Sleep(time.Millisecond * 5) + vuHandle.gracefulStop() + time.Sleep(time.Millisecond * 5) + err = vuHandle.start() + require.NoError(t, err) + time.Sleep(time.Millisecond * 1500) + assert.EqualValues(t, 1, atomic.LoadUint32(&getVUCount)) + assert.EqualValues(t, 0, atomic.LoadUint32(&returnVUCount)) + assert.EqualValues(t, 0, atomic.LoadInt64(&interruptedIter)) + assert.EqualValues(t, 1, atomic.LoadInt64(&fullIterations)) + cancel() + wg.Wait() + time.Sleep(time.Millisecond * 5) + assert.EqualValues(t, 1, atomic.LoadUint32(&getVUCount)) + assert.EqualValues(t, 1, atomic.LoadUint32(&returnVUCount)) + assert.EqualValues(t, 1, atomic.LoadInt64(&interruptedIter)) + assert.EqualValues(t, 1, atomic.LoadInt64(&fullIterations)) + }) + + t.Run("start after gracefulStop finishes", func(t *testing.T) { + reset() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, &BaseConfig{}, logEntry) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + vuHandle.runLoopsIfPossible(runIter) + }() + err := vuHandle.start() + require.NoError(t, err) + time.Sleep(time.Millisecond * 50) + vuHandle.gracefulStop() + time.Sleep(time.Millisecond * 1500) + assert.EqualValues(t, 1, atomic.LoadUint32(&getVUCount)) + assert.EqualValues(t, 1, atomic.LoadUint32(&returnVUCount)) + assert.EqualValues(t, 0, atomic.LoadInt64(&interruptedIter)) + assert.EqualValues(t, 1, atomic.LoadInt64(&fullIterations)) + err = vuHandle.start() + require.NoError(t, err) + time.Sleep(time.Millisecond * 1500) + cancel() + wg.Wait() + + time.Sleep(time.Millisecond * 50) + assert.EqualValues(t, 2, atomic.LoadUint32(&getVUCount)) + assert.EqualValues(t, 2, atomic.LoadUint32(&returnVUCount)) + assert.EqualValues(t, 1, atomic.LoadInt64(&interruptedIter)) + assert.EqualValues(t, 2, atomic.LoadInt64(&fullIterations)) + }) + + t.Run("start after hardStop", func(t *testing.T) { + reset() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, &BaseConfig{}, logEntry) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + vuHandle.runLoopsIfPossible(runIter) + }() + err := vuHandle.start() + require.NoError(t, err) + time.Sleep(time.Millisecond * 5) + vuHandle.hardStop() + time.Sleep(time.Millisecond * 15) + assert.EqualValues(t, 1, atomic.LoadUint32(&getVUCount)) + assert.EqualValues(t, 1, atomic.LoadUint32(&returnVUCount)) + assert.EqualValues(t, 1, atomic.LoadInt64(&interruptedIter)) + assert.EqualValues(t, 0, atomic.LoadInt64(&fullIterations)) + err = vuHandle.start() + require.NoError(t, err) + time.Sleep(time.Millisecond * 1500) + cancel() + wg.Wait() + + time.Sleep(time.Millisecond * 5) + assert.EqualValues(t, 2, atomic.LoadUint32(&getVUCount)) + assert.EqualValues(t, 2, atomic.LoadUint32(&returnVUCount)) + assert.EqualValues(t, 2, atomic.LoadInt64(&interruptedIter)) + assert.EqualValues(t, 1, atomic.LoadInt64(&fullIterations)) + }) +} + +func BenchmarkVUHandleIterations(b *testing.B) { + logHook := &testutils.SimpleLogrusHook{HookedLevels: []logrus.Level{logrus.DebugLevel}} + testLog := logrus.New() + testLog.AddHook(logHook) + // testLog.Level = logrus.DebugLevel + logEntry := logrus.NewEntry(testLog) + + var ( + getVUCount uint32 + returnVUCount uint32 + interruptedIter int64 + fullIterations int64 + ) + reset := func() { + getVUCount = 0 + returnVUCount = 0 + interruptedIter = 0 + fullIterations = 0 + } + + getVU := func() (lib.InitializedVU, error) { + atomic.AddUint32(&getVUCount, 1) + + return &minirunner.VU{ + R: &minirunner.MiniRunner{ + Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error { + // TODO: do something + return nil + }, + }, + }, nil + } + + returnVU := func(_ lib.InitializedVU) { + atomic.AddUint32(&returnVUCount, 1) + } + + runIter := func(ctx context.Context, _ lib.ActiveVU) bool { + // Do nothing + select { + case <-ctx.Done(): + // Don't log errors or emit iterations metrics from cancelled iterations + atomic.AddInt64(&interruptedIter, 1) + return false + default: + atomic.AddInt64(&fullIterations, 1) + return true + } + } + + reset() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vuHandle := newStoppedVUHandle(ctx, getVU, returnVU, &BaseConfig{}, logEntry) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + vuHandle.runLoopsIfPossible(runIter) + }() + start := time.Now() + b.ResetTimer() + err := vuHandle.start() + require.NoError(b, err) + time.Sleep(time.Second) + cancel() + wg.Wait() + b.StopTimer() + took := time.Since(start) + b.ReportMetric(float64(atomic.LoadInt64(&fullIterations))/float64(took), "iterations/ns") +} diff --git a/lib/executors.go b/lib/executors.go new file mode 100644 index 00000000000..2912c1d58b3 --- /dev/null +++ b/lib/executors.go @@ -0,0 +1,346 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package lib + +import ( + "context" + "encoding/json" + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/sirupsen/logrus" + + "github.com/loadimpact/k6/stats" + "github.com/loadimpact/k6/ui/pb" +) + +// TODO: remove globals and use some type of explicit dependency injection? +//nolint:gochecknoglobals +var ( + executorConfigTypesMutex sync.RWMutex + executorConfigConstructors = make(map[string]ExecutorConfigConstructor) +) + +// ExecutionStep is used by different executors to specify the planned number of +// VUs they will need at a particular time. The times are relative to their +// StartTime, i.e. they don't take into account the specific starting time of +// the executor, as that will be considered by the external execution executor +// separately. +// +// A slice [{t1, v1}, {t2, v2}, {t3, v3}, ..., {tn, vn}] of execution steps +// means that an executor will need 0 VUs until t1, it will need v1 number of +// VUs from time t1 until t2, need v2 number of VUs from time t2 to t3, and so +// on. t1 is usually 0, tn is usually the same as GetMaxDuration() and vn is +// usually 0. +// +// Keep in mind that t(i) may be exactly equal to t(i+i), when there's an abrupt +// transition in the number of VUs required by an executor. For example, the +// ramping-vus executor may have 0-duration stages, or it may scale up +// VUs in its last stage right until the end. These immediate transitions cannot +// be ignored, since the gracefulStop/gracefulRampDown options potentially allow +// any started iterations to finish. +// +// []ExecutionStep is also used by the ScenarioConfigs, to represent the +// amount of needed VUs among all executors, during the whole execution of a +// test script. In that context, each executor's StartTime is accounted for and +// included in the offsets. +type ExecutionStep struct { + TimeOffset time.Duration + PlannedVUs uint64 + MaxUnplannedVUs uint64 +} + +// TODO: make []ExecutionStep or []ExecutorConfig their own type? + +// ExecutorConfig is an interface that should be implemented by all executor config types +type ExecutorConfig interface { + Validate() []error + + GetName() string + GetType() string + GetStartTime() time.Duration + GetGracefulStop() time.Duration + + // This is used to validate whether a particular script can run in the cloud + // or, in the future, in the native k6 distributed execution. Currently only + // the externally-controlled executor should return false. + IsDistributable() bool + + GetEnv() map[string]string + // Allows us to get the non-default function the executor should run, if it + // has been specified. + // + // TODO: use interface{} so plain http requests can be specified? + GetExec() string + GetTags() map[string]string + + // Calculates the VU requirements in different stages of the executor's + // execution, including any extensions caused by waiting for iterations to + // finish with graceful stops or ramp-downs. + GetExecutionRequirements(*ExecutionTuple) []ExecutionStep + + // Return a human-readable description of the executor + GetDescription(*ExecutionTuple) string + + NewExecutor(*ExecutionState, *logrus.Entry) (Executor, error) + + // HasWork reports whether there is any work for the executor to do with a given segment. + HasWork(*ExecutionTuple) bool +} + +// InitVUFunc is just a shorthand so we don't have to type the function +// signature every time. +type InitVUFunc func(context.Context, *logrus.Entry) (InitializedVU, error) + +// Executor is the interface all executors should implement +type Executor interface { + GetConfig() ExecutorConfig + GetProgress() *pb.ProgressBar + GetLogger() *logrus.Entry + + Init(ctx context.Context) error + Run(ctx context.Context, engineOut chan<- stats.SampleContainer) error +} + +// PausableExecutor should be implemented by the executors that can be paused +// and resumed in the middle of the test execution. Currently, only the +// externally controlled executor implements it. +type PausableExecutor interface { + SetPaused(bool) error +} + +// LiveUpdatableExecutor should be implemented for the executors whose +// configuration can be modified in the middle of the test execution. Currently, +// only the manual execution executor implements it. +type LiveUpdatableExecutor interface { + UpdateConfig(ctx context.Context, newConfig interface{}) error +} + +// ExecutorConfigConstructor is a simple function that returns a concrete +// Config instance with the specified name and all default values correctly +// initialized +type ExecutorConfigConstructor func(name string, rawJSON []byte) (ExecutorConfig, error) + +// RegisterExecutorConfigType adds the supplied ExecutorConfigConstructor as +// the constructor for its type in the configConstructors map, in a thread-safe +// manner +func RegisterExecutorConfigType(configType string, constructor ExecutorConfigConstructor) { + executorConfigTypesMutex.Lock() + defer executorConfigTypesMutex.Unlock() + + if constructor == nil { + panic("executor configs: constructor is nil") + } + if _, configTypeExists := executorConfigConstructors[configType]; configTypeExists { + panic("executor configs: lib.RegisterExecutorConfigType called twice for " + configType) + } + + executorConfigConstructors[configType] = constructor +} + +// ScenarioConfigs can contain mixed executor config types +type ScenarioConfigs map[string]ExecutorConfig + +// UnmarshalJSON implements the json.Unmarshaler interface in a two-step manner, +// creating the correct type of configs based on the `type` property. +func (scs *ScenarioConfigs) UnmarshalJSON(data []byte) error { + if len(data) == 0 { + return nil + } + + if len(data) == 4 && string(data) == "null" { + return nil + } + + // TODO: use a more sophisticated combination of dec.Token() and dec.More(), + // which would allow us to support both arrays and maps for this config? + var protoConfigs map[string]protoExecutorConfig + if err := StrictJSONUnmarshal(data, &protoConfigs); err != nil { + return err + } + + result := make(ScenarioConfigs, len(protoConfigs)) + for k, v := range protoConfigs { + if v.executorType == "" { + return fmt.Errorf("scenario '%s' doesn't have a specified executor type", k) + } + config, err := GetParsedExecutorConfig(k, v.executorType, v.rawJSON) + if err != nil { + return err + } + result[k] = config + } + + *scs = result + + return nil +} + +// Validate checks if all of the specified executor options make sense +func (scs ScenarioConfigs) Validate() (errors []error) { + for name, exec := range scs { + if execErr := exec.Validate(); len(execErr) != 0 { + errors = append(errors, + fmt.Errorf("scenario %s has configuration errors: %s", name, ConcatErrors(execErr, ", "))) + } + } + return errors +} + +// GetSortedConfigs returns a slice with the executor configurations, +// sorted in a consistent and predictable manner. It is useful when we want or +// have to avoid using maps with string keys (and tons of string lookups in +// them) and avoid the unpredictable iterations over Go maps. Slices allow us +// constant-time lookups and ordered iterations. +// +// The configs in the returned slice will be sorted by their start times in an +// ascending order, and alphabetically by their names (which are unique) if +// there are ties. +func (scs ScenarioConfigs) GetSortedConfigs() []ExecutorConfig { + configs := make([]ExecutorConfig, len(scs)) + + // Populate the configs slice with sorted executor configs + i := 0 + for _, config := range scs { + configs[i] = config // populate the slice in an unordered manner + i++ + } + sort.Slice(configs, func(a, b int) bool { // sort by (start time, name) + switch { + case configs[a].GetStartTime() < configs[b].GetStartTime(): + return true + case configs[a].GetStartTime() == configs[b].GetStartTime(): + return strings.Compare(configs[a].GetName(), configs[b].GetName()) < 0 + default: + return false + } + }) + + return configs +} + +// GetFullExecutionRequirements combines the execution requirements from all of +// the configured executors. It takes into account their start times and their +// individual VU requirements and calculates the total VU requirements for each +// moment in the test execution. +func (scs ScenarioConfigs) GetFullExecutionRequirements(et *ExecutionTuple) []ExecutionStep { + sortedConfigs := scs.GetSortedConfigs() + + // Combine the steps and requirements from all different executors, and + // sort them by their time offset, counting the executors' startTimes as + // well. + type trackedStep struct { + ExecutionStep + configID int + } + trackedSteps := []trackedStep{} + for configID, config := range sortedConfigs { // orderly iteration over a slice + configStartTime := config.GetStartTime() + configSteps := config.GetExecutionRequirements(et) + for _, cs := range configSteps { + cs.TimeOffset += configStartTime // add the executor start time to the step time offset + trackedSteps = append(trackedSteps, trackedStep{cs, configID}) + } + } + // Sort by (time offset, config id). It's important that we use stable + // sorting algorithm, since there could be steps with the same time from + // the same executor and their order is important. + sort.SliceStable(trackedSteps, func(a, b int) bool { + if trackedSteps[a].TimeOffset == trackedSteps[b].TimeOffset { + return trackedSteps[a].configID < trackedSteps[b].configID + } + + return trackedSteps[a].TimeOffset < trackedSteps[b].TimeOffset + }) + + // Go through all of the sorted steps from all of the executors, and + // build a new list of execution steps that consolidates all of their + // requirements. If multiple executors have an execution step at exactly + // the same time offset, they will be combined into a single new execution + // step with the sum of the values from the previous ones. + currentTimeOffset := time.Duration(0) + currentPlannedVUs := make([]uint64, len(scs)) + currentMaxUnplannedVUs := make([]uint64, len(scs)) + sum := func(data []uint64) (result uint64) { // sigh... + for _, val := range data { + result += val + } + return result + } + consolidatedSteps := []ExecutionStep{} + addCurrentStepIfDifferent := func() { + newPlannedVUs := sum(currentPlannedVUs) + newMaxUnplannedVUs := sum(currentMaxUnplannedVUs) + stepsLen := len(consolidatedSteps) + if stepsLen == 0 || + consolidatedSteps[stepsLen-1].PlannedVUs != newPlannedVUs || + consolidatedSteps[stepsLen-1].MaxUnplannedVUs != newMaxUnplannedVUs { + consolidatedSteps = append(consolidatedSteps, ExecutionStep{ + TimeOffset: currentTimeOffset, + PlannedVUs: newPlannedVUs, + MaxUnplannedVUs: newMaxUnplannedVUs, + }) + } + } + for _, step := range trackedSteps { + // TODO: optimize by skipping some steps + // If the time offset is different, create a new step with the current values + + currentTimeOffset = step.TimeOffset + currentPlannedVUs[step.configID] = step.PlannedVUs + currentMaxUnplannedVUs[step.configID] = step.MaxUnplannedVUs + addCurrentStepIfDifferent() + } + return consolidatedSteps +} + +// GetParsedExecutorConfig returns a struct instance corresponding to the supplied +// config type. It will be fully initialized - with both the default values of +// the type, as well as with whatever the user had specified in the JSON +func GetParsedExecutorConfig(name, configType string, rawJSON []byte) (result ExecutorConfig, err error) { + executorConfigTypesMutex.Lock() + defer executorConfigTypesMutex.Unlock() + + constructor, exists := executorConfigConstructors[configType] + if !exists { + return nil, fmt.Errorf("unknown executor type '%s'", configType) + } + return constructor(name, rawJSON) +} + +type protoExecutorConfig struct { + executorType string + rawJSON json.RawMessage +} + +// UnmarshalJSON unmarshals the base config (to get the type), but it also +// stores the unprocessed JSON so we can parse the full config in the next step +func (pc *protoExecutorConfig) UnmarshalJSON(b []byte) error { + var tmp struct { + ExecutorType string `json:"executor"` + } + err := json.Unmarshal(b, &tmp) + *pc = protoExecutorConfig{tmp.ExecutorType, b} + return err +} diff --git a/lib/fsext/cacheonread.go b/lib/fsext/cacheonread.go index 9a534f3f664..7f57ddcdc22 100644 --- a/lib/fsext/cacheonread.go +++ b/lib/fsext/cacheonread.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package fsext import ( diff --git a/lib/fsext/changepathfs.go b/lib/fsext/changepathfs.go index 7f7a81d6f2f..473a92d2874 100644 --- a/lib/fsext/changepathfs.go +++ b/lib/fsext/changepathfs.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package fsext import ( diff --git a/lib/fsext/changepathfs_test.go b/lib/fsext/changepathfs_test.go index 5e91ea77f97..006da3f4cc6 100644 --- a/lib/fsext/changepathfs_test.go +++ b/lib/fsext/changepathfs_test.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package fsext import ( diff --git a/lib/fsext/trimpathseparator_test.go b/lib/fsext/trimpathseparator_test.go index 678b0c1b758..d5a2865e733 100644 --- a/lib/fsext/trimpathseparator_test.go +++ b/lib/fsext/trimpathseparator_test.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package fsext import ( diff --git a/lib/fsext/walk.go b/lib/fsext/walk.go index 2869f022c82..345561418e8 100644 --- a/lib/fsext/walk.go +++ b/lib/fsext/walk.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package fsext import ( diff --git a/lib/helpers.go b/lib/helpers.go new file mode 100644 index 00000000000..8d2e133528a --- /dev/null +++ b/lib/helpers.go @@ -0,0 +1,114 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package lib + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "time" +) + +// StrictJSONUnmarshal decodes a JSON in a strict manner, emitting an error if there +// are unknown fields or unexpected data +func StrictJSONUnmarshal(data []byte, v interface{}) error { + dec := json.NewDecoder(bytes.NewReader(data)) + dec.DisallowUnknownFields() + dec.UseNumber() + + if err := dec.Decode(&v); err != nil { + return err + } + if dec.More() { + //TODO: use a custom error? + return fmt.Errorf("unexpected data after the JSON object") + } + return nil +} + +// GetMaxPlannedVUs returns the maximum number of planned VUs at any stage of +// the execution plan. +func GetMaxPlannedVUs(steps []ExecutionStep) (result uint64) { + for _, s := range steps { + stepMaxPlannedVUs := s.PlannedVUs + if stepMaxPlannedVUs > result { + result = stepMaxPlannedVUs + } + } + return result +} + +// GetMaxPossibleVUs returns the maximum number of planned + unplanned (i.e. +// initialized mid-test) VUs at any stage of the execution plan. Unplanned VUs +// are possible in some executors, like the arrival-rate ones, as a way to have +// a low number of pre-allocated VUs, but be able to initialize new ones in the +// middle of the test, if needed. For example, if the remote system starts +// responding very slowly and all of the pre-allocated VUs are waiting for it. +// +// IMPORTANT 1: Getting planned and unplanned VUs separately for the whole +// duration of a test can often lead to mistakes. That's why this function is +// called GetMaxPossibleVUs() and why there is no GetMaxUnplannedVUs() function. +// +// As an example, imagine that you have an executor with MaxPlannedVUs=20 and +// MaxUnplannedVUs=0, followed immediately after by another executor with +// MaxPlannedVUs=10 and MaxUnplannedVUs=10. The MaxPlannedVUs number for the +// whole test is 20, and MaxUnplannedVUs is 10, but since those executors won't +// run concurrently, MaxVUs for the whole test is not 30, rather it's 20, since +// 20 VUs will be sufficient to run the whole test. +// +// IMPORTANT 2: this has one very important exception. The externally controlled +// executor doesn't use the MaxUnplannedVUs (i.e. this function will return 0), +// since their initialization and usage is directly controlled by the user and +// is effectively bounded only by the resources of the machine k6 is running on. +func GetMaxPossibleVUs(steps []ExecutionStep) (result uint64) { + for _, s := range steps { + stepMaxPossibleVUs := s.PlannedVUs + s.MaxUnplannedVUs + if stepMaxPossibleVUs > result { + result = stepMaxPossibleVUs + } + } + return result +} + +// GetEndOffset returns the time offset of the last step of the execution plan, +// and whether that step is a final one, i.e. whether the number of planned or +// unplanned is 0. +func GetEndOffset(steps []ExecutionStep) (lastStepOffset time.Duration, isFinal bool) { + if len(steps) == 0 { + return 0, true + } + lastStep := steps[len(steps)-1] + return lastStep.TimeOffset, (lastStep.PlannedVUs == 0 && lastStep.MaxUnplannedVUs == 0) +} + +// ConcatErrors is a a helper function for joining error messages into a single +// string. +// +// TODO: use Go 2.0/xerrors style errors so we don't loose error type information and +// metadata. +func ConcatErrors(errors []error, separator string) string { + errStrings := make([]string, len(errors)) + for i, e := range errors { + errStrings[i] = e.Error() + } + return strings.Join(errStrings, separator) +} diff --git a/lib/scheduler/helpers_test.go b/lib/helpers_test.go similarity index 73% rename from lib/scheduler/helpers_test.go rename to lib/helpers_test.go index 8165f922fd6..7bcdf2ea8df 100644 --- a/lib/scheduler/helpers_test.go +++ b/lib/helpers_test.go @@ -18,7 +18,7 @@ * */ -package scheduler +package lib import ( "fmt" @@ -28,19 +28,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestCheckPercentagesSum(t *testing.T) { - t.Parallel() - assert.NoError(t, checkPercentagesSum([]float64{100})) - assert.NoError(t, checkPercentagesSum([]float64{50, 50})) - assert.NoError(t, checkPercentagesSum([]float64{100.0 / 3, 100.0 / 3, 100.0 / 3})) - assert.NoError(t, checkPercentagesSum([]float64{33.33, 33.33, 33.34})) - - assert.Error(t, checkPercentagesSum([]float64{})) - assert.Error(t, checkPercentagesSum([]float64{100 / 3, 100 / 3, 100 / 3})) - assert.Error(t, checkPercentagesSum([]float64{33.33, 33.33, 33.33})) - assert.Error(t, checkPercentagesSum([]float64{40, 40, 40})) -} - func TestStrictJSONUnmarshal(t *testing.T) { t.Parallel() type someElement struct { @@ -67,7 +54,7 @@ func TestStrictJSONUnmarshal(t *testing.T) { for i, tc := range testCases { tc := tc t.Run(fmt.Sprintf("TestCase#%d", i), func(t *testing.T) { - err := strictJSONUnmarshal([]byte(tc.data), &tc.destination) + err := StrictJSONUnmarshal([]byte(tc.data), &tc.destination) if tc.expectedError { require.Error(t, err) return @@ -77,3 +64,5 @@ func TestStrictJSONUnmarshal(t *testing.T) { }) } } + +//TODO: test EventStream very thoroughly diff --git a/js/modules/k6/http/tls_go_1_12_test.go b/lib/lib.go similarity index 90% rename from js/modules/k6/http/tls_go_1_12_test.go rename to lib/lib.go index e248f84e96f..c1a664e7e5d 100644 --- a/js/modules/k6/http/tls_go_1_12_test.go +++ b/lib/lib.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * k6 - a next-generation load testing tool @@ -20,6 +18,5 @@ * */ -package http - -const expectedTLSVersion = "tls1.3" +// Package lib contains most interfaces and base structs of k6. +package lib diff --git a/lib/limiter.go b/lib/limiter.go index a99619b49a9..e1c3c072cab 100644 --- a/lib/limiter.go +++ b/lib/limiter.go @@ -65,6 +65,7 @@ type MultiSlotLimiter struct { } // NewMultiSlotLimiter initializes and returns a new MultiSlotLimiter with the given slot count +//TODO: move to lib and use something better than a mutex? sync.Map perhaps? func NewMultiSlotLimiter(slots int) *MultiSlotLimiter { return &MultiSlotLimiter{make(map[string]SlotLimiter), slots, sync.Mutex{}} } diff --git a/lib/metrics/metrics.go b/lib/metrics/metrics.go index d550d762bd1..4c0d025ca53 100644 --- a/lib/metrics/metrics.go +++ b/lib/metrics/metrics.go @@ -26,12 +26,14 @@ import ( //TODO: refactor this, using non thread-safe global variables seems like a bad idea for various reasons... +//nolint:gochecknoglobals var ( // Engine-emitted. VUs = stats.New("vus", stats.Gauge) VUsMax = stats.New("vus_max", stats.Gauge) Iterations = stats.New("iterations", stats.Counter) IterationDuration = stats.New("iteration_duration", stats.Trend, stats.Time) + DroppedIterations = stats.New("dropped_iterations", stats.Counter) Errors = stats.New("errors", stats.Counter) // Runner-emitted. @@ -52,7 +54,7 @@ var ( WSSessions = stats.New("ws_sessions", stats.Counter) WSMessagesSent = stats.New("ws_msgs_sent", stats.Counter) WSMessagesReceived = stats.New("ws_msgs_received", stats.Counter) - WSPing = stats.New("ws_ping", stats.Trend) + WSPing = stats.New("ws_ping", stats.Trend, stats.Time) WSSessionDuration = stats.New("ws_session_duration", stats.Trend, stats.Time) WSConnecting = stats.New("ws_connecting", stats.Trend, stats.Time) diff --git a/lib/models.go b/lib/models.go index 3944eeecba6..e7b69b29183 100644 --- a/lib/models.go +++ b/lib/models.go @@ -29,9 +29,10 @@ import ( "sync" "time" - "github.com/loadimpact/k6/lib/types" "github.com/pkg/errors" "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib/types" ) // Separator for group IDs. diff --git a/lib/models_test.go b/lib/models_test.go index 85d020fd3b8..5211bc9eb25 100644 --- a/lib/models_test.go +++ b/lib/models_test.go @@ -26,9 +26,10 @@ import ( "testing" "time" - "github.com/loadimpact/k6/lib/types" "github.com/stretchr/testify/assert" "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib/types" ) func TestStageJSON(t *testing.T) { diff --git a/lib/netext/dialer.go b/lib/netext/dialer.go index cecc4e735a7..aed10c4fd27 100644 --- a/lib/netext/dialer.go +++ b/lib/netext/dialer.go @@ -28,11 +28,11 @@ import ( "sync/atomic" "time" + "github.com/viki-org/dnscache" + "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/metrics" "github.com/loadimpact/k6/stats" - - "github.com/viki-org/dnscache" ) // Dialer wraps net.Dialer and provides k6 specific functionality - diff --git a/lib/netext/httpext/compression.go b/lib/netext/httpext/compression.go index 2a7299602ca..d52f07c8a73 100644 --- a/lib/netext/httpext/compression.go +++ b/lib/netext/httpext/compression.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package httpext import ( diff --git a/lib/netext/httpext/error_codes.go b/lib/netext/httpext/error_codes.go index b240d6841c2..1d771629d34 100644 --- a/lib/netext/httpext/error_codes.go +++ b/lib/netext/httpext/error_codes.go @@ -30,9 +30,10 @@ import ( "runtime" "syscall" - "github.com/loadimpact/k6/lib/netext" "github.com/pkg/errors" "golang.org/x/net/http2" + + "github.com/loadimpact/k6/lib/netext" ) // TODO: maybe rename the type errorCode, so we can have errCode variables? and diff --git a/lib/netext/httpext/error_codes_test.go b/lib/netext/httpext/error_codes_test.go index 76ab824a5a8..50c3d76e99d 100644 --- a/lib/netext/httpext/error_codes_test.go +++ b/lib/netext/httpext/error_codes_test.go @@ -31,10 +31,11 @@ import ( "syscall" "testing" - "github.com/loadimpact/k6/lib/netext" "github.com/pkg/errors" "github.com/stretchr/testify/require" "golang.org/x/net/http2" + + "github.com/loadimpact/k6/lib/netext" ) func TestDefaultError(t *testing.T) { diff --git a/lib/netext/httpext/request.go b/lib/netext/httpext/request.go index 72acd545022..eeace3a0ba4 100644 --- a/lib/netext/httpext/request.go +++ b/lib/netext/httpext/request.go @@ -36,7 +36,7 @@ import ( "github.com/Azure/go-ntlmssp" "github.com/sirupsen/logrus" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/stats" @@ -231,11 +231,10 @@ func MakeRequest(ctx context.Context, preq *ParsedHTTPRequest) (*Response, error } } - tags := state.Options.RunTags.CloneTags() + tags := state.CloneTags() for k, v := range preq.Tags { tags[k] = v } - if state.Options.SystemTags.Has(stats.TagMethod) { tags["method"] = preq.Req.Method } @@ -247,15 +246,6 @@ func MakeRequest(ctx context.Context, preq *ParsedHTTPRequest) (*Response, error if _, ok := tags["name"]; !ok && state.Options.SystemTags.Has(stats.TagName) { tags["name"] = preq.URL.Name } - if state.Options.SystemTags.Has(stats.TagGroup) { - tags["group"] = state.Group.Path - } - if state.Options.SystemTags.Has(stats.TagVU) { - tags["vu"] = strconv.FormatInt(state.Vu, 10) - } - if state.Options.SystemTags.Has(stats.TagIter) { - tags["iter"] = strconv.FormatInt(state.Iteration, 10) - } // Check rate limit *after* we've prepared a request; no need to wait with that part. if rpsLimit := state.RPSLimit; rpsLimit != nil { diff --git a/lib/netext/httpext/request_test.go b/lib/netext/httpext/request_test.go index 059bc2fa9cf..a0e11dfb94d 100644 --- a/lib/netext/httpext/request_test.go +++ b/lib/netext/httpext/request_test.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package httpext import ( @@ -11,12 +31,13 @@ import ( "testing" "time" - "github.com/loadimpact/k6/lib" - "github.com/loadimpact/k6/stats" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/stats" ) type reader func([]byte) (int, error) diff --git a/lib/netext/httpext/response.go b/lib/netext/httpext/response.go index 7aa921bd42f..11189901094 100644 --- a/lib/netext/httpext/response.go +++ b/lib/netext/httpext/response.go @@ -26,9 +26,10 @@ import ( "encoding/json" "fmt" - "github.com/loadimpact/k6/lib/netext" "github.com/pkg/errors" "github.com/tidwall/gjson" + + "github.com/loadimpact/k6/lib/netext" ) // ResponseType is used in the request to specify how the response body should be treated diff --git a/lib/netext/httpext/tracer_test.go b/lib/netext/httpext/tracer_test.go index 75b3d12fbe0..451bc239c31 100644 --- a/lib/netext/httpext/tracer_test.go +++ b/lib/netext/httpext/tracer_test.go @@ -35,13 +35,14 @@ import ( "testing" "time" - "github.com/loadimpact/k6/lib/metrics" - "github.com/loadimpact/k6/lib/netext" - "github.com/loadimpact/k6/stats" "github.com/mccutchen/go-httpbin/httpbin" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/loadimpact/k6/lib/metrics" + "github.com/loadimpact/k6/lib/netext" + "github.com/loadimpact/k6/stats" ) func TestTracer(t *testing.T) { diff --git a/lib/netext/tls.go b/lib/netext/tls.go index e4bd24eb3c1..de92a785fea 100644 --- a/lib/netext/tls.go +++ b/lib/netext/tls.go @@ -1,10 +1,31 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package netext import ( "crypto/tls" - "github.com/loadimpact/k6/lib" "golang.org/x/crypto/ocsp" + + "github.com/loadimpact/k6/lib" ) //nolint: golint diff --git a/lib/old_archive_test.go b/lib/old_archive_test.go index 2944ae6dcc1..a64d930190f 100644 --- a/lib/old_archive_test.go +++ b/lib/old_archive_test.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package lib import ( @@ -9,10 +29,10 @@ import ( "path/filepath" "testing" - "github.com/loadimpact/k6/lib/fsext" - "github.com/loadimpact/k6/lib/scheduler" "github.com/spf13/afero" "github.com/stretchr/testify/require" + + "github.com/loadimpact/k6/lib/fsext" ) func dumpMemMapFsToBuf(fs afero.Fs) (*bytes.Buffer, error) { @@ -210,18 +230,20 @@ func TestFilenamePwdResolve(t *testing.T) { } func TestDerivedExecutionDiscarding(t *testing.T) { - var emptyConfigMap scheduler.ConfigMap + var emptyConfigMap ScenarioConfigs var tests = []struct { metadata string - expExecution interface{} + expScenarios interface{} expError string }{ + // Tests to make sure that "execution" in the options, the old name for + // "scenarios" before #1007 was merged, doesn't mess up the options... { metadata: `{ "filename": "/test.js", "pwd": "/", "options": { "execution": { "something": "invalid" } } }`, - expExecution: emptyConfigMap, + expScenarios: emptyConfigMap, }, { metadata: `{ @@ -229,45 +251,30 @@ func TestDerivedExecutionDiscarding(t *testing.T) { "k6version": "0.24.0", "options": { "execution": { "something": "invalid" } } }`, - expExecution: emptyConfigMap, + expScenarios: emptyConfigMap, }, { metadata: `blah`, expError: "invalid character", }, - { - metadata: `{ - "filename": "/test.js", "pwd": "/", - "k6version": "0.24.0" - }`, - expError: "missing options key", - }, { metadata: `{ "filename": "/test.js", "pwd": "/", "k6version": "0.24.0", "options": "something invalid" }`, - expError: "wrong options type in metadata.json", + expError: "cannot unmarshal string into Go struct field", }, { metadata: `{ "filename": "/test.js", "pwd": "/", "k6version": "0.25.0", - "options": { "execution": { "something": "invalid" } } + "options": { "scenarios": { "something": "invalid" } } }`, expError: "cannot unmarshal string", }, - { - metadata: `{ - "filename": "/test.js", "pwd": "/", - "k6version": "0.25.0", - "options": { "execution": { "default": { "type": "per-vu-iterations" } } } - }`, - expExecution: scheduler.ConfigMap{ - DefaultSchedulerName: scheduler.NewPerVUIterationsConfig(DefaultSchedulerName), - }, - }, + // TODO: test an actual scenarios unmarshalling, which is currently + // impossible due to import cycles... } for _, test := range tests { @@ -278,11 +285,11 @@ func TestDerivedExecutionDiscarding(t *testing.T) { arc, err := ReadArchive(buf) if test.expError != "" { - require.Error(t, err) + require.Errorf(t, err, "expected error '%s' but got nil", test.expError) require.Contains(t, err.Error(), test.expError) } else { require.NoError(t, err) - require.Equal(t, test.expExecution, arc.Options.Execution) + require.Equal(t, test.expScenarios, arc.Options.Scenarios) } } } diff --git a/lib/options.go b/lib/options.go index f94341a3ff3..2f51d61a538 100644 --- a/lib/options.go +++ b/lib/options.go @@ -27,17 +27,17 @@ import ( "net" "reflect" - "github.com/loadimpact/k6/lib/scheduler" - "github.com/loadimpact/k6/lib/types" - "github.com/loadimpact/k6/stats" "github.com/pkg/errors" "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats" ) -// DefaultSchedulerName is used as the default key/ID of the scheduler config entries +// DefaultScenarioName is used as the default key/ID of the scenario config entries // that were created due to the use of the shortcut execution control options (i.e. duration+vus, // iterations+vus, or stages) -const DefaultSchedulerName = "default" +const DefaultScenarioName = "default" // DefaultSummaryTrendStats are the default trend columns shown in the test summary output // nolint: gochecknoglobals @@ -193,18 +193,25 @@ type Options struct { // Initial values for VUs, max VUs, duration cap, iteration cap, and stages. // See the Runner or Executor interfaces for more information. - VUs null.Int `json:"vus" envconfig:"K6_VUS"` - - //TODO: deprecate this? or reuse it in the manual control "scheduler"? - VUsMax null.Int `json:"vusMax" envconfig:"K6_VUS_MAX"` + VUs null.Int `json:"vus" envconfig:"K6_VUS"` Duration types.NullDuration `json:"duration" envconfig:"K6_DURATION"` Iterations null.Int `json:"iterations" envconfig:"K6_ITERATIONS"` Stages []Stage `json:"stages" envconfig:"K6_STAGES"` - Execution scheduler.ConfigMap `json:"execution,omitempty" envconfig:"-"` + // TODO: remove the `ignored:"true"` from the field tags, it's there so that + // the envconfig library will ignore those fields. + // + // We should support specifying execution segments via environment + // variables, but we currently can't, because envconfig has this nasty bug + // (among others): https://github.com/kelseyhightower/envconfig/issues/113 + Scenarios ScenarioConfigs `json:"scenarios,omitempty" ignored:"true"` + ExecutionSegment *ExecutionSegment `json:"executionSegment" ignored:"true"` + ExecutionSegmentSequence *ExecutionSegmentSequence `json:"executionSegmentSequence" ignored:"true"` // Timeouts for the setup() and teardown() functions + NoSetup null.Bool `json:"noSetup" envconfig:"NO_SETUP"` SetupTimeout types.NullDuration `json:"setupTimeout" envconfig:"K6_SETUP_TIMEOUT"` + NoTeardown null.Bool `json:"noTeardown" envconfig:"NO_TEARDOWN"` TeardownTimeout types.NullDuration `json:"teardownTimeout" envconfig:"K6_TEARDOWN_TIMEOUT"` // Limit HTTP requests per second. @@ -300,9 +307,6 @@ func (o Options) Apply(opts Options) Options { if opts.VUs.Valid { o.VUs = opts.VUs } - if opts.VUsMax.Valid { - o.VUsMax = opts.VUsMax - } // Specifying duration, iterations, stages, or execution in a "higher" config tier // will overwrite all of the the previous execution settings (if any) from any @@ -310,14 +314,12 @@ func (o Options) Apply(opts Options) Options { // Still, if more than one of those options is simultaneously specified in the same // config tier, they will be preserved, so the validation after we've consolidated // all of the options can return an error. - if opts.Duration.Valid || opts.Iterations.Valid || opts.Stages != nil || opts.Execution != nil { - //TODO: uncomment this after we start using the new schedulers - /* - o.Duration = types.NewNullDuration(0, false) - o.Iterations = null.NewInt(0, false) - o.Stages = nil - */ - o.Execution = nil + if opts.Duration.Valid || opts.Iterations.Valid || opts.Stages != nil || opts.Scenarios != nil { + // TODO: emit a warning or a notice log message if overwrite lower tier config options? + o.Duration = types.NewNullDuration(0, false) + o.Iterations = null.NewInt(0, false) + o.Stages = nil + o.Scenarios = nil } if opts.Duration.Valid { @@ -338,12 +340,25 @@ func (o Options) Apply(opts Options) Options { // that happens after the configuration from the different sources is consolidated. It can't // happen here, because something like `K6_ITERATIONS=10 k6 run --vus 5 script.js` wont't // work correctly at this level. - if opts.Execution != nil { - o.Execution = opts.Execution + if opts.Scenarios != nil { + o.Scenarios = opts.Scenarios + } + if opts.ExecutionSegment != nil { + o.ExecutionSegment = opts.ExecutionSegment + } + + if opts.ExecutionSegmentSequence != nil { + o.ExecutionSegmentSequence = opts.ExecutionSegmentSequence + } + if opts.NoSetup.Valid { + o.NoSetup = opts.NoSetup } if opts.SetupTimeout.Valid { o.SetupTimeout = opts.SetupTimeout } + if opts.NoTeardown.Valid { + o.NoTeardown = opts.NoTeardown + } if opts.TeardownTimeout.Valid { o.TeardownTimeout = opts.TeardownTimeout } @@ -434,9 +449,24 @@ func (o Options) Apply(opts Options) Options { // Validate checks if all of the specified options make sense func (o Options) Validate() []error { - //TODO: validate all of the other options... that we should have already been validating... - //TODO: maybe integrate an external validation lib: https://github.com/avelino/awesome-go#validation - return o.Execution.Validate() + // TODO: validate all of the other options... that we should have already been validating... + // TODO: maybe integrate an external validation lib: https://github.com/avelino/awesome-go#validation + var errors []error + if o.ExecutionSegmentSequence != nil { + var segmentFound bool + for _, segment := range *o.ExecutionSegmentSequence { + if o.ExecutionSegment.Equal(segment) { + segmentFound = true + break + } + } + if !segmentFound { + errors = append(errors, + fmt.Errorf("provided segment %s can't be found in sequence %s", + o.ExecutionSegment, o.ExecutionSegmentSequence)) + } + } + return append(errors, o.Scenarios.Validate()...) } // ForEachSpecified enumerates all struct fields and calls the supplied function with each diff --git a/lib/options_test.go b/lib/options_test.go index c7d86928636..1ead264d9a2 100644 --- a/lib/options_test.go +++ b/lib/options_test.go @@ -23,19 +23,20 @@ package lib import ( "crypto/tls" "encoding/json" + "fmt" "net" - "os" "reflect" "testing" "time" "github.com/kelseyhightower/envconfig" - "github.com/loadimpact/k6/lib/scheduler" - "github.com/loadimpact/k6/lib/types" - "github.com/loadimpact/k6/stats" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib/testutils" + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats" ) func TestOptions(t *testing.T) { @@ -49,11 +50,6 @@ func TestOptions(t *testing.T) { assert.True(t, opts.VUs.Valid) assert.Equal(t, int64(12345), opts.VUs.Int64) }) - t.Run("VUsMax", func(t *testing.T) { - opts := Options{}.Apply(Options{VUsMax: null.IntFrom(12345)}) - assert.True(t, opts.VUsMax.Valid) - assert.Equal(t, int64(12345), opts.VUsMax.Int64) - }) t.Run("Duration", func(t *testing.T) { opts := Options{}.Apply(Options{Duration: types.NullDurationFrom(2 * time.Minute)}) assert.True(t, opts.Duration.Valid) @@ -88,17 +84,7 @@ func TestOptions(t *testing.T) { assert.Equal(t, oneStage, opts.Apply(Options{Stages: oneStage}).Stages) assert.Equal(t, oneStage, Options{}.Apply(opts).Apply(Options{Stages: oneStage}).Apply(Options{Stages: oneStage}).Stages) }) - t.Run("Execution", func(t *testing.T) { - sched := scheduler.NewConstantLoopingVUsConfig("test") - sched.VUs = null.IntFrom(123) - sched.Duration = types.NullDurationFrom(3 * time.Minute) - opts := Options{}.Apply(Options{Execution: scheduler.ConfigMap{"test": sched}}) - cs, ok := opts.Execution["test"].(scheduler.ConstantLoopingVUsConfig) - assert.True(t, ok) - assert.Equal(t, int64(123), cs.VUs.Int64) - assert.Equal(t, "3m0s", cs.Duration.String()) - }) - //TODO: test that any execution option overwrites any other lower-level options + // Execution overwriting is tested by the config consolidation test in cmd t.Run("RPS", func(t *testing.T) { opts := Options{}.Apply(Options{RPS: null.IntFrom(12345)}) assert.True(t, opts.RPS.Valid) @@ -146,7 +132,6 @@ func TestOptions(t *testing.T) { } t.Run("JSON", func(t *testing.T) { - t.Run("String", func(t *testing.T) { var opts Options jsonStr := `{"tlsCipherSuites":["TLS_ECDHE_RSA_WITH_RC4_128_SHA"]}` @@ -401,7 +386,6 @@ func TestOptions(t *testing.T) { assert.True(t, opts.DiscardResponseBodies.Valid) assert.True(t, opts.DiscardResponseBodies.Bool) }) - } func TestOptionsEnv(t *testing.T) { @@ -415,10 +399,6 @@ func TestOptionsEnv(t *testing.T) { "": null.Int{}, "123": null.IntFrom(123), }, - {"VUsMax", "K6_VUS_MAX"}: { - "": null.Int{}, - "123": null.IntFrom(123), - }, {"Duration", "K6_DURATION"}: { "": types.NullDuration{}, "10s": types.NullDurationFrom(10 * time.Second), @@ -429,8 +409,10 @@ func TestOptionsEnv(t *testing.T) { }, {"Stages", "K6_STAGES"}: { // "": []Stage{}, - "1s": []Stage{{ - Duration: types.NullDurationFrom(1 * time.Second)}, + "1s": []Stage{ + { + Duration: types.NullDurationFrom(1 * time.Second), + }, }, "1s:100": []Stage{ {Duration: types.NullDurationFrom(1 * time.Second), Target: null.IntFrom(100)}, @@ -480,11 +462,13 @@ func TestOptionsEnv(t *testing.T) { // External } for field, data := range testdata { - os.Clearenv() + field, data := field, data t.Run(field.Name, func(t *testing.T) { for str, val := range data { + str, val := str, val t.Run(`"`+str+`"`, func(t *testing.T) { - assert.NoError(t, os.Setenv(field.Key, str)) + restore := testutils.SetEnv(t, []string{fmt.Sprintf("%s=%s", field.Key, str)}) + defer restore() var opts Options assert.NoError(t, envconfig.Process("k6", &opts)) assert.Equal(t, val, reflect.ValueOf(opts).FieldByName(field.Name).Interface()) @@ -495,8 +479,7 @@ func TestOptionsEnv(t *testing.T) { } func TestCIDRUnmarshal(t *testing.T) { - - var testData = []struct { + testData := []struct { input string expectedOutput *IPNet expactFailure bool diff --git a/lib/runner.go b/lib/runner.go index e3fde944104..b874f3ca1c8 100644 --- a/lib/runner.go +++ b/lib/runner.go @@ -26,16 +26,43 @@ import ( "github.com/loadimpact/k6/stats" ) -// Ensure mock implementations conform to the interfaces. -var _ Runner = &MiniRunner{} -var _ VU = &MiniRunnerVU{} - -// A Runner is a factory for VUs. It should precompute as much as possible upon creation (parse -// ASTs, load files into memory, etc.), so that spawning VUs becomes as fast as possible. -// The Runner doesn't actually *do* anything in itself, the Executor is responsible for wrapping -// and scheduling these VUs for execution. +// ActiveVU represents an actively running virtual user. +type ActiveVU interface { + // Run the configured exported function in the VU once. The only + // way to interrupt the execution is to cancel the context given + // to InitializedVU.Activate() + RunOnce() error +} + +// InitializedVU represents a virtual user ready for work. It needs to be +// activated (i.e. given a context) before it can actually be used. Activation +// also requires a callback function, which will be called when the supplied +// context is done. That way, VUs can be returned to a pool and reused. +type InitializedVU interface { + // Fully activate the VU so it will be able to run code + Activate(*VUActivationParams) ActiveVU + + // GetID returns the unique VU ID + GetID() int64 +} + +// VUActivationParams are supplied by each executor when it retrieves a VU from +// the buffer pool and activates it for use. +type VUActivationParams struct { + RunContext context.Context + DeactivateCallback func(InitializedVU) + Env, Tags map[string]string + Exec, Scenario string +} + +// A Runner is a factory for VUs. It should precompute as much as possible upon +// creation (parse ASTs, load files into memory, etc.), so that spawning VUs +// becomes as fast as possible. The Runner doesn't actually *do* anything in +// itself, the ExecutionScheduler is responsible for wrapping and scheduling +// these VUs for execution. // -// TODO: Rename this to something more obvious? This name made sense a very long time ago. +// TODO: Rename this to something more obvious? This name made sense a very long +// time ago. type Runner interface { // Creates an Archive of the runner. There should be a corresponding NewFromArchive() function // that will restore the runner from the archive. @@ -44,7 +71,7 @@ type Runner interface { // Spawns a new VU. It's fine to make this function rather heavy, if it means a performance // improvement at runtime. Remember, this is called once per VU and normally only at the start // of a test - RunOnce() may be called hundreds of thousands of times, and must be fast. - NewVU(out chan<- stats.SampleContainer) (VU, error) + NewVU(id int64, out chan<- stats.SampleContainer) (InitializedVU, error) // Runs pre-test setup, if applicable. Setup(ctx context.Context, out chan<- stats.SampleContainer) error @@ -66,98 +93,8 @@ type Runner interface { // values and write it back to the runner. GetOptions() Options SetOptions(opts Options) error -} - -// A VU is a Virtual User, that can be scheduled by an Executor. -type VU interface { - // Runs the VU once. The VU is responsible for handling the Halting Problem, eg. making sure - // that execution actually stops when the context is cancelled. - RunOnce(ctx context.Context) error - - // Assign the VU a new ID. Called by the Executor upon creation, but may be called multiple - // times if the VU is recycled because the test was scaled down and then back up. - Reconfigure(id int64) error -} - -// MiniRunner wraps a function in a runner whose VUs will simply call that function. -type MiniRunner struct { - Fn func(ctx context.Context, out chan<- stats.SampleContainer) error - SetupFn func(ctx context.Context, out chan<- stats.SampleContainer) ([]byte, error) - TeardownFn func(ctx context.Context, out chan<- stats.SampleContainer) error - - setupData []byte - - Group *Group - Options Options -} - -func (r MiniRunner) VU(out chan<- stats.SampleContainer) *MiniRunnerVU { - return &MiniRunnerVU{R: r, Out: out} -} - -func (r MiniRunner) MakeArchive() *Archive { - return nil -} - -func (r MiniRunner) NewVU(out chan<- stats.SampleContainer) (VU, error) { - return r.VU(out), nil -} - -func (r *MiniRunner) Setup(ctx context.Context, out chan<- stats.SampleContainer) (err error) { - if fn := r.SetupFn; fn != nil { - r.setupData, err = fn(ctx, out) - } - return -} - -// GetSetupData returns json representation of the setup data if setup() is specified and run, nil otherwise -func (r MiniRunner) GetSetupData() []byte { - return r.setupData -} - -// SetSetupData saves the externally supplied setup data as json in the runner -func (r *MiniRunner) SetSetupData(data []byte) { - r.setupData = data -} - -func (r MiniRunner) Teardown(ctx context.Context, out chan<- stats.SampleContainer) error { - if fn := r.TeardownFn; fn != nil { - return fn(ctx, out) - } - return nil -} - -func (r MiniRunner) GetDefaultGroup() *Group { - if r.Group == nil { - r.Group = &Group{} - } - return r.Group -} - -func (r MiniRunner) GetOptions() Options { - return r.Options -} - -func (r *MiniRunner) SetOptions(opts Options) error { - r.Options = opts - return nil -} - -// A VU spawned by a MiniRunner. -type MiniRunnerVU struct { - R MiniRunner - Out chan<- stats.SampleContainer - ID int64 -} - -func (vu MiniRunnerVU) RunOnce(ctx context.Context) error { - if vu.R.Fn == nil { - return nil - } - return vu.R.Fn(ctx, vu.Out) -} -func (vu *MiniRunnerVU) Reconfigure(id int64) error { - vu.ID = id - return nil + // Returns whether the given name is an exported and executable + // function in the script. + IsExecutable(string) bool } diff --git a/lib/runtime_options.go b/lib/runtime_options.go index e1245d9161c..b39d84ff3c6 100644 --- a/lib/runtime_options.go +++ b/lib/runtime_options.go @@ -24,45 +24,46 @@ import ( "fmt" "strings" - "github.com/loadimpact/k6/js/compiler" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" +) + +// CompatibilityMode specifies the JS compatibility mode +// nolint:lll +//go:generate enumer -type=CompatibilityMode -transform=snake -trimprefix CompatibilityMode -output compatibility_mode_gen.go +type CompatibilityMode uint8 + +const ( + // CompatibilityModeExtended achieves ES6+ compatibility with Babel and core.js + CompatibilityModeExtended CompatibilityMode = iota + 1 + // CompatibilityModeBase is standard goja ES5.1+ + CompatibilityModeBase ) // RuntimeOptions are settings passed onto the goja JS runtime type RuntimeOptions struct { // Whether to pass the actual system environment variables to the JS runtime - IncludeSystemEnvVars null.Bool `json:"includeSystemEnvVars" envconfig:"K6_INCLUDE_SYSTEM_ENV_VARS"` + IncludeSystemEnvVars null.Bool `json:"includeSystemEnvVars"` // JS compatibility mode: "extended" (Goja+Babel+core.js) or "base" (plain Goja) + // + // TODO: when we resolve https://github.com/loadimpact/k6/issues/883, we probably + // should use the CompatibilityMode type directly... but by then, we'd need to have + // some way of knowing if the value has been set by the user or if we're using the + // default one, so we can handle `k6 run --compatibility-mode=base es6_extended_archive.tar` CompatibilityMode null.String `json:"compatibilityMode"` // Environment variables passed onto the runner - Env map[string]string `json:"env" envconfig:"K6_ENV"` -} - -// Apply overwrites the receiver RuntimeOptions' fields with any that are set -// on the argument struct and returns the receiver -func (o RuntimeOptions) Apply(opts RuntimeOptions) RuntimeOptions { - if opts.IncludeSystemEnvVars.Valid { - o.IncludeSystemEnvVars = opts.IncludeSystemEnvVars - } - if opts.CompatibilityMode.Valid { - o.CompatibilityMode = opts.CompatibilityMode - } - if opts.Env != nil { - o.Env = opts.Env - } - return o + Env map[string]string `json:"env"` } // ValidateCompatibilityMode checks if the provided val is a valid compatibility mode -func ValidateCompatibilityMode(val string) (cm compiler.CompatibilityMode, err error) { +func ValidateCompatibilityMode(val string) (cm CompatibilityMode, err error) { if val == "" { - return compiler.CompatibilityModeExtended, nil + return CompatibilityModeExtended, nil } - if cm, err = compiler.CompatibilityModeString(val); err != nil { + if cm, err = CompatibilityModeString(val); err != nil { var compatValues []string - for _, v := range compiler.CompatibilityModeValues() { + for _, v := range CompatibilityModeValues() { compatValues = append(compatValues, v.String()) } err = fmt.Errorf(`invalid compatibility mode "%s". Use: "%s"`, diff --git a/lib/scheduler/base_config.go b/lib/scheduler/base_config.go deleted file mode 100644 index 73a4c22a4b4..00000000000 --- a/lib/scheduler/base_config.go +++ /dev/null @@ -1,103 +0,0 @@ -/* - * - * k6 - a next-generation load testing tool - * Copyright (C) 2019 Load Impact - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -package scheduler - -import ( - "fmt" - "time" - - "github.com/loadimpact/k6/lib/types" - null "gopkg.in/guregu/null.v3" -) - -const minPercentage = 0.01 - -// The maximum time k6 will wait after an iteration is supposed to be done -const maxIterationTimeout = 300 * time.Second - -// BaseConfig contains the common config fields for all schedulers -type BaseConfig struct { - Name string `json:"-"` // set via the JS object key - Type string `json:"type"` - StartTime types.NullDuration `json:"startTime"` - Interruptible null.Bool `json:"interruptible"` - IterationTimeout types.NullDuration `json:"iterationTimeout"` - Env map[string]string `json:"env"` - Exec null.String `json:"exec"` // function name, externally validated - Percentage float64 `json:"-"` // 100, unless Split() was called - - //TODO: future extensions like tags, distribution, others? -} - -// NewBaseConfig returns a default base config with the default values -func NewBaseConfig(name, configType string, interruptible bool) BaseConfig { - return BaseConfig{ - Name: name, - Type: configType, - Interruptible: null.NewBool(interruptible, false), - IterationTimeout: types.NewNullDuration(30*time.Second, false), - Percentage: 100, - } -} - -// Validate checks some basic things like present name, type, and a positive start time -func (bc BaseConfig) Validate() (errors []error) { - // Some just-in-case checks, since those things are likely checked in other places or - // even assigned by us: - if bc.Name == "" { - errors = append(errors, fmt.Errorf("scheduler name shouldn't be empty")) - } - if bc.Type == "" { - errors = append(errors, fmt.Errorf("missing or empty type field")) - } - if bc.Percentage < minPercentage || bc.Percentage > 100 { - errors = append(errors, fmt.Errorf( - "percentage should be between %f and 100, but is %f", minPercentage, bc.Percentage, - )) - } - if bc.Exec.Valid && bc.Exec.String == "" { - errors = append(errors, fmt.Errorf("exec value cannot be empty")) - } - // The actually reasonable checks: - if bc.StartTime.Duration < 0 { - errors = append(errors, fmt.Errorf("scheduler start time can't be negative")) - } - iterTimeout := time.Duration(bc.IterationTimeout.Duration) - if iterTimeout < 0 || iterTimeout > maxIterationTimeout { - errors = append(errors, fmt.Errorf( - "the iteration timeout should be between 0 and %s, but is %s", maxIterationTimeout, iterTimeout, - )) - } - return errors -} - -// GetBaseConfig just returns itself -func (bc BaseConfig) GetBaseConfig() BaseConfig { - return bc -} - -// CopyWithPercentage is a helper function that just sets the percentage to -// the specified amount. -func (bc BaseConfig) CopyWithPercentage(percentage float64) *BaseConfig { - c := bc - c.Percentage = percentage - return &c -} diff --git a/lib/scheduler/configmap.go b/lib/scheduler/configmap.go deleted file mode 100644 index 072f26f81e7..00000000000 --- a/lib/scheduler/configmap.go +++ /dev/null @@ -1,128 +0,0 @@ -/* - * - * k6 - a next-generation load testing tool - * Copyright (C) 2019 Load Impact - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -package scheduler - -import ( - "encoding/json" - "fmt" - "sync" -) - -// ConfigMap can contain mixed scheduler config types -type ConfigMap map[string]Config - -// ConfigConstructor is a simple function that returns a concrete Config instance -// with the specified name and all default values correctly initialized -type ConfigConstructor func(name string, rawJSON []byte) (Config, error) - -//nolint:gochecknoglobals -var ( - configTypesMutex sync.RWMutex - configConstructors = make(map[string]ConfigConstructor) -) - -// RegisterConfigType adds the supplied ConfigConstructor as the constructor for its -// type in the configConstructors map, in a thread-safe manner -func RegisterConfigType(configType string, constructor ConfigConstructor) { - configTypesMutex.Lock() - defer configTypesMutex.Unlock() - - if constructor == nil { - panic("scheduler configs: constructor is nil") - } - if _, configTypeExists := configConstructors[configType]; configTypeExists { - panic("scheduler configs: RegisterConfigType called twice for " + configType) - } - - configConstructors[configType] = constructor -} - -// GetParsedConfig returns a struct instance corresponding to the supplied -// config type. It will be fully initialized - with both the default values of -// the type, as well as with whatever the user had specified in the JSON -func GetParsedConfig(name, configType string, rawJSON []byte) (result Config, err error) { - configTypesMutex.Lock() - defer configTypesMutex.Unlock() - - constructor, exists := configConstructors[configType] - if !exists { - return nil, fmt.Errorf("unknown execution scheduler type '%s'", configType) - } - return constructor(name, rawJSON) -} - -// UnmarshalJSON implements the json.Unmarshaler interface in a two-step manner, -// creating the correct type of configs based on the `type` property. -func (scs *ConfigMap) UnmarshalJSON(data []byte) error { - if len(data) == 0 { - return nil - } - - if len(data) == 4 && string(data) == "null" { - return nil - } - - //TODO: use a more sophisticated combination of dec.Token() and dec.More(), - // which would allow us to support both arrays and maps for this config? - var protoConfigs map[string]protoConfig - if err := strictJSONUnmarshal(data, &protoConfigs); err != nil { - return err - } - - result := make(ConfigMap, len(protoConfigs)) - for k, v := range protoConfigs { - if v.Type == "" { - return fmt.Errorf("execution config '%s' doesn't have a type value", k) - } - config, err := GetParsedConfig(k, v.Type, v.rawJSON) - if err != nil { - return err - } - result[k] = config - } - - *scs = result - - return nil -} - -// Validate checks if all of the specified scheduler options make sense -func (scs ConfigMap) Validate() (errors []error) { - for name, scheduler := range scs { - if schedErr := scheduler.Validate(); len(schedErr) != 0 { - errors = append(errors, - fmt.Errorf("scheduler %s has errors: %s", name, concatErrors(schedErr, ", "))) - } - } - return errors -} - -type protoConfig struct { - BaseConfig - rawJSON json.RawMessage -} - -// UnmarshalJSON just reads unmarshals the base config (to get the type), but it also -// stores the unprocessed JSON so we can parse the full config in the next step -func (pc *protoConfig) UnmarshalJSON(b []byte) error { - *pc = protoConfig{BaseConfig{}, b} - return json.Unmarshal(b, &pc.BaseConfig) -} diff --git a/lib/scheduler/constant_arrival_rate.go b/lib/scheduler/constant_arrival_rate.go deleted file mode 100644 index ed53297e924..00000000000 --- a/lib/scheduler/constant_arrival_rate.go +++ /dev/null @@ -1,115 +0,0 @@ -/* - * - * k6 - a next-generation load testing tool - * Copyright (C) 2019 Load Impact - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -package scheduler - -import ( - "fmt" - "time" - - "github.com/loadimpact/k6/lib/types" - null "gopkg.in/guregu/null.v3" -) - -const constantArrivalRateType = "constant-arrival-rate" - -func init() { - RegisterConfigType(constantArrivalRateType, func(name string, rawJSON []byte) (Config, error) { - config := NewConstantArrivalRateConfig(name) - err := strictJSONUnmarshal(rawJSON, &config) - return config, err - }) -} - -// ConstantArrivalRateConfig stores config for the constant arrival-rate scheduler -type ConstantArrivalRateConfig struct { - BaseConfig - Rate null.Int `json:"rate"` - TimeUnit types.NullDuration `json:"timeUnit"` - Duration types.NullDuration `json:"duration"` - - // Initialize `PreAllocatedVUs` number of VUs, and if more than that are needed, - // they will be dynamically allocated, until `MaxVUs` is reached, which is an - // absolutely hard limit on the number of VUs the scheduler will use - PreAllocatedVUs null.Int `json:"preAllocatedVUs"` - MaxVUs null.Int `json:"maxVUs"` -} - -// NewConstantArrivalRateConfig returns a ConstantArrivalRateConfig with default values -func NewConstantArrivalRateConfig(name string) ConstantArrivalRateConfig { - return ConstantArrivalRateConfig{ - BaseConfig: NewBaseConfig(name, constantArrivalRateType, false), - TimeUnit: types.NewNullDuration(1*time.Second, false), - } -} - -// Make sure we implement the Config interface -var _ Config = &ConstantArrivalRateConfig{} - -// Validate makes sure all options are configured and valid -func (carc ConstantArrivalRateConfig) Validate() []error { - errors := carc.BaseConfig.Validate() - if !carc.Rate.Valid { - errors = append(errors, fmt.Errorf("the iteration rate isn't specified")) - } else if carc.Rate.Int64 <= 0 { - errors = append(errors, fmt.Errorf("the iteration rate should be more than 0")) - } - - if time.Duration(carc.TimeUnit.Duration) <= 0 { - errors = append(errors, fmt.Errorf("the timeUnit should be more than 0")) - } - - if !carc.Duration.Valid { - errors = append(errors, fmt.Errorf("the duration is unspecified")) - } else if time.Duration(carc.Duration.Duration) < minDuration { - errors = append(errors, fmt.Errorf( - "the duration should be at least %s, but is %s", minDuration, carc.Duration, - )) - } - - if !carc.PreAllocatedVUs.Valid { - errors = append(errors, fmt.Errorf("the number of preAllocatedVUs isn't specified")) - } else if carc.PreAllocatedVUs.Int64 < 0 { - errors = append(errors, fmt.Errorf("the number of preAllocatedVUs shouldn't be negative")) - } - - if !carc.MaxVUs.Valid { - errors = append(errors, fmt.Errorf("the number of maxVUs isn't specified")) - } else if carc.MaxVUs.Int64 < carc.PreAllocatedVUs.Int64 { - errors = append(errors, fmt.Errorf("maxVUs shouldn't be less than preAllocatedVUs")) - } - - return errors -} - -// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs -func (carc ConstantArrivalRateConfig) GetMaxVUs() int64 { - return carc.MaxVUs.Int64 -} - -// GetMaxDuration returns the maximum duration time for this scheduler, including -// the specified iterationTimeout, if the iterations are uninterruptible -func (carc ConstantArrivalRateConfig) GetMaxDuration() time.Duration { - maxDuration := carc.Duration.Duration - if !carc.Interruptible.Bool { - maxDuration += carc.IterationTimeout.Duration - } - return time.Duration(maxDuration) -} diff --git a/lib/scheduler/constant_looping_vus.go b/lib/scheduler/constant_looping_vus.go deleted file mode 100644 index a6293cb1b17..00000000000 --- a/lib/scheduler/constant_looping_vus.go +++ /dev/null @@ -1,113 +0,0 @@ -/* - * - * k6 - a next-generation load testing tool - * Copyright (C) 2019 Load Impact - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -package scheduler - -import ( - "fmt" - "time" - - "github.com/loadimpact/k6/lib/types" - null "gopkg.in/guregu/null.v3" -) - -const constantLoopingVUsType = "constant-looping-vus" - -func init() { - RegisterConfigType(constantLoopingVUsType, func(name string, rawJSON []byte) (Config, error) { - config := NewConstantLoopingVUsConfig(name) - err := strictJSONUnmarshal(rawJSON, &config) - return config, err - }) -} - -// The minimum duration we'll allow users to schedule. This doesn't affect the stages -// configuration, where 0-duration virtual stages are allowed for instantaneous VU jumps -const minDuration = 1 * time.Second - -// ConstantLoopingVUsConfig stores VUs and duration -type ConstantLoopingVUsConfig struct { - BaseConfig - VUs null.Int `json:"vus"` - Duration types.NullDuration `json:"duration"` -} - -// NewConstantLoopingVUsConfig returns a ConstantLoopingVUsConfig with default values -func NewConstantLoopingVUsConfig(name string) ConstantLoopingVUsConfig { - return ConstantLoopingVUsConfig{ - BaseConfig: NewBaseConfig(name, constantLoopingVUsType, false), - VUs: null.NewInt(1, false), - } -} - -// Make sure we implement the Config interface -var _ Config = &ConstantLoopingVUsConfig{} - -// Validate makes sure all options are configured and valid -func (lcv ConstantLoopingVUsConfig) Validate() []error { - errors := lcv.BaseConfig.Validate() - if lcv.VUs.Int64 <= 0 { - errors = append(errors, fmt.Errorf("the number of VUs should be more than 0")) - } - - if !lcv.Duration.Valid { - errors = append(errors, fmt.Errorf("the duration is unspecified")) - } else if time.Duration(lcv.Duration.Duration) < minDuration { - errors = append(errors, fmt.Errorf( - "the duration should be at least %s, but is %s", minDuration, lcv.Duration, - )) - } - - return errors -} - -// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs -func (lcv ConstantLoopingVUsConfig) GetMaxVUs() int64 { - return lcv.VUs.Int64 -} - -// GetMaxDuration returns the maximum duration time for this scheduler, including -// the specified iterationTimeout, if the iterations are uninterruptible -func (lcv ConstantLoopingVUsConfig) GetMaxDuration() time.Duration { - maxDuration := lcv.Duration.Duration - if !lcv.Interruptible.Bool { - maxDuration += lcv.IterationTimeout.Duration - } - return time.Duration(maxDuration) -} - -// Split divides the VUS as best it can, but keeps the same duration -func (lcv ConstantLoopingVUsConfig) Split(percentages []float64) ([]Config, error) { - if err := checkPercentagesSum(percentages); err != nil { - return nil, err - } - configs := make([]Config, len(percentages)) - for i, p := range percentages { - //TODO: figure out a better approach for the proportional distribution - // of the VUs (which are indivisible items)... - // Some sort of "pick closest match to percentage and adjust remaining"? - configs[i] = &ConstantLoopingVUsConfig{ - BaseConfig: *lcv.BaseConfig.CopyWithPercentage(p), - VUs: null.IntFrom(int64(float64(lcv.VUs.Int64) / p)), - Duration: lcv.Duration, - } - } - return configs, nil -} diff --git a/lib/scheduler/helpers.go b/lib/scheduler/helpers.go deleted file mode 100644 index 31bf37bf22a..00000000000 --- a/lib/scheduler/helpers.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * - * k6 - a next-generation load testing tool - * Copyright (C) 2019 Load Impact - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -package scheduler - -import ( - "bytes" - "encoding/json" - "fmt" - "math" - "strings" -) - -// A helper function to verify percentage distributions -func checkPercentagesSum(percentages []float64) error { - var sum float64 - for _, v := range percentages { - sum += v - } - if math.Abs(100-sum) >= minPercentage { - return fmt.Errorf("split percentage sum is %.2f while it should be 100", sum) - } - return nil -} - -// A helper function for joining error messages into a single string -func concatErrors(errors []error, separator string) string { - errStrings := make([]string, len(errors)) - for i, e := range errors { - errStrings[i] = e.Error() - } - return strings.Join(errStrings, separator) -} - -// Decode a JSON in a strict manner, emitting an error if there are unknown fields -func strictJSONUnmarshal(data []byte, v interface{}) error { - dec := json.NewDecoder(bytes.NewReader(data)) - dec.DisallowUnknownFields() - dec.UseNumber() - - if err := dec.Decode(&v); err != nil { - return err - } - if dec.More() { - return fmt.Errorf("unexpected data after the JSON object") - } - return nil -} - -// A helper function to avoid code duplication -func validateStages(stages []Stage) []error { - var errors []error - if len(stages) == 0 { - errors = append(errors, fmt.Errorf("at least one stage has to be specified")) - } else { - for i, s := range stages { - stageNum := i + 1 - if !s.Duration.Valid { - errors = append(errors, fmt.Errorf("stage %d doesn't have a duration", stageNum)) - } else if s.Duration.Duration < 0 { - errors = append(errors, fmt.Errorf("the duration for stage %d shouldn't be negative", stageNum)) - } - if !s.Target.Valid { - errors = append(errors, fmt.Errorf("stage %d doesn't have a target", stageNum)) - } else if s.Target.Int64 < 0 { - errors = append(errors, fmt.Errorf("the target for stage %d shouldn't be negative", stageNum)) - } - } - } - return errors -} diff --git a/lib/scheduler/per_vu_iterations.go b/lib/scheduler/per_vu_iterations.go deleted file mode 100644 index 25501411ea4..00000000000 --- a/lib/scheduler/per_vu_iterations.go +++ /dev/null @@ -1,95 +0,0 @@ -/* - * - * k6 - a next-generation load testing tool - * Copyright (C) 2019 Load Impact - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -package scheduler - -import ( - "fmt" - "time" - - "github.com/loadimpact/k6/lib/types" - null "gopkg.in/guregu/null.v3" -) - -const perVUIterationsType = "per-vu-iterations" - -func init() { - RegisterConfigType(perVUIterationsType, func(name string, rawJSON []byte) (Config, error) { - config := NewPerVUIterationsConfig(name) - err := strictJSONUnmarshal(rawJSON, &config) - return config, err - }) -} - -// PerVUIteationsConfig stores the number of VUs iterations, as well as maxDuration settings -type PerVUIteationsConfig struct { - BaseConfig - VUs null.Int `json:"vus"` - Iterations null.Int `json:"iterations"` - MaxDuration types.NullDuration `json:"maxDuration"` -} - -// NewPerVUIterationsConfig returns a PerVUIteationsConfig with default values -func NewPerVUIterationsConfig(name string) PerVUIteationsConfig { - return PerVUIteationsConfig{ - BaseConfig: NewBaseConfig(name, perVUIterationsType, false), - VUs: null.NewInt(1, false), - Iterations: null.NewInt(1, false), - MaxDuration: types.NewNullDuration(1*time.Hour, false), - } -} - -// Make sure we implement the Config interface -var _ Config = &PerVUIteationsConfig{} - -// Validate makes sure all options are configured and valid -func (pvic PerVUIteationsConfig) Validate() []error { - errors := pvic.BaseConfig.Validate() - if pvic.VUs.Int64 <= 0 { - errors = append(errors, fmt.Errorf("the number of VUs should be more than 0")) - } - - if pvic.Iterations.Int64 <= 0 { - errors = append(errors, fmt.Errorf("the number of iterations should be more than 0")) - } - - if time.Duration(pvic.MaxDuration.Duration) < minDuration { - errors = append(errors, fmt.Errorf( - "the maxDuration should be at least %s, but is %s", minDuration, pvic.MaxDuration, - )) - } - - return errors -} - -// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs -func (pvic PerVUIteationsConfig) GetMaxVUs() int64 { - return pvic.VUs.Int64 -} - -// GetMaxDuration returns the maximum duration time for this scheduler, including -// the specified iterationTimeout, if the iterations are uninterruptible -func (pvic PerVUIteationsConfig) GetMaxDuration() time.Duration { - maxDuration := pvic.MaxDuration.Duration - if !pvic.Interruptible.Bool { - maxDuration += pvic.IterationTimeout.Duration - } - return time.Duration(maxDuration) -} diff --git a/lib/scheduler/schedulers_test.go b/lib/scheduler/schedulers_test.go deleted file mode 100644 index 11c07453f64..00000000000 --- a/lib/scheduler/schedulers_test.go +++ /dev/null @@ -1,239 +0,0 @@ -/* - * - * k6 - a next-generation load testing tool - * Copyright (C) 2019 Load Impact - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -package scheduler - -import ( - "encoding/json" - "fmt" - "testing" - "time" - - "github.com/loadimpact/k6/lib/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - null "gopkg.in/guregu/null.v3" -) - -type configMapTestCase struct { - rawJSON string - expectParseError bool - expectValidationError bool - customValidator func(t *testing.T, cm ConfigMap) -} - -//nolint:lll,gochecknoglobals -var configMapTestCases = []configMapTestCase{ - {"", true, false, nil}, - {"1234", true, false, nil}, - {"asdf", true, false, nil}, - {"'adsf'", true, false, nil}, - {"[]", true, false, nil}, - {"{}", false, false, func(t *testing.T, cm ConfigMap) { - assert.Equal(t, cm, ConfigMap{}) - }}, - {"{}asdf", true, false, nil}, - {"null", false, false, func(t *testing.T, cm ConfigMap) { - assert.Nil(t, cm) - }}, - {`{"someKey": {}}`, true, false, nil}, - {`{"someKey": {"type": "constant-blah-blah", "vus": 10, "duration": "60s"}}`, true, false, nil}, - {`{"someKey": {"type": "constant-looping-vus", "uknownField": "should_error"}}`, true, false, nil}, - {`{"someKey": {"type": "constant-looping-vus", "vus": 10, "duration": "60s", "env": 123}}`, true, false, nil}, - - // Validation errors for constant-looping-vus and the base config - {`{"someKey": {"type": "constant-looping-vus", "vus": 10, "duration": "60s", "interruptible": false, - "iterationTimeout": "10s", "startTime": "70s", "env": {"test": "mest"}, "exec": "someFunc"}}`, - false, false, func(t *testing.T, cm ConfigMap) { - sched := NewConstantLoopingVUsConfig("someKey") - sched.VUs = null.IntFrom(10) - sched.Duration = types.NullDurationFrom(1 * time.Minute) - sched.Interruptible = null.BoolFrom(false) - sched.IterationTimeout = types.NullDurationFrom(10 * time.Second) - sched.StartTime = types.NullDurationFrom(70 * time.Second) - sched.Exec = null.StringFrom("someFunc") - sched.Env = map[string]string{"test": "mest"} - require.Equal(t, cm, ConfigMap{"someKey": sched}) - require.Equal(t, sched.BaseConfig, cm["someKey"].GetBaseConfig()) - assert.Equal(t, 70*time.Second, cm["someKey"].GetMaxDuration()) - assert.Equal(t, int64(10), cm["someKey"].GetMaxVUs()) - assert.Empty(t, cm["someKey"].Validate()) - }}, - {`{"aname": {"type": "constant-looping-vus", "duration": "60s"}}`, false, false, nil}, - {`{"": {"type": "constant-looping-vus", "vus": 10, "duration": "60s"}}`, false, true, nil}, - {`{"aname": {"type": "constant-looping-vus"}}`, false, true, nil}, - {`{"aname": {"type": "constant-looping-vus", "vus": 0.5}}`, true, false, nil}, - {`{"aname": {"type": "constant-looping-vus", "vus": 10}}`, false, true, nil}, - {`{"aname": {"type": "constant-looping-vus", "vus": 0, "duration": "60s"}}`, false, true, nil}, - {`{"aname": {"type": "constant-looping-vus", "vus": -1, "duration": "60s"}}`, false, true, nil}, - {`{"aname": {"type": "constant-looping-vus", "vus": 10, "duration": "0s"}}`, false, true, nil}, - {`{"aname": {"type": "constant-looping-vus", "vus": 10, "duration": "10s", "startTime": "-10s"}}`, false, true, nil}, - {`{"aname": {"type": "constant-looping-vus", "vus": 10, "duration": "10s", "exec": ""}}`, false, true, nil}, - {`{"aname": {"type": "constant-looping-vus", "vus": 10, "duration": "10s", "iterationTimeout": "-2s"}}`, false, true, nil}, - - // variable-looping-vus - {`{"varloops": {"type": "variable-looping-vus", "startVUs": 20, "iterationTimeout": "15s", - "stages": [{"duration": "60s", "target": 30}, {"duration": "120s", "target": 10}]}}`, - false, false, func(t *testing.T, cm ConfigMap) { - sched := NewVariableLoopingVUsConfig("varloops") - sched.IterationTimeout = types.NullDurationFrom(15 * time.Second) - sched.StartVUs = null.IntFrom(20) - sched.Stages = []Stage{ - {Target: null.IntFrom(30), Duration: types.NullDurationFrom(60 * time.Second)}, - {Target: null.IntFrom(10), Duration: types.NullDurationFrom(120 * time.Second)}, - } - require.Equal(t, cm, ConfigMap{"varloops": sched}) - assert.Equal(t, int64(30), cm["varloops"].GetMaxVUs()) - assert.Equal(t, 195*time.Second, cm["varloops"].GetMaxDuration()) - assert.Empty(t, cm["varloops"].Validate()) - }}, - {`{"varloops": {"type": "variable-looping-vus", "startVUs": 0, "stages": [{"duration": "60s", "target": 0}]}}`, false, false, nil}, - {`{"varloops": {"type": "variable-looping-vus", "startVUs": -1, "stages": [{"duration": "60s", "target": 30}]}}`, false, true, nil}, - {`{"varloops": {"type": "variable-looping-vus", "startVUs": 2, "stages": [{"duration": "-60s", "target": 30}]}}`, false, true, nil}, - {`{"varloops": {"type": "variable-looping-vus", "startVUs": 2, "stages": [{"duration": "60s", "target": -30}]}}`, false, true, nil}, - {`{"varloops": {"type": "variable-looping-vus", "stages": [{"duration": "60s"}]}}`, false, true, nil}, - {`{"varloops": {"type": "variable-looping-vus", "stages": [{"target": 30}]}}`, false, true, nil}, - {`{"varloops": {"type": "variable-looping-vus", "stages": []}}`, false, true, nil}, - {`{"varloops": {"type": "variable-looping-vus"}}`, false, true, nil}, - - // shared-iterations - {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10}}`, - false, false, func(t *testing.T, cm ConfigMap) { - sched := NewSharedIterationsConfig("ishared") - sched.Iterations = null.IntFrom(20) - sched.VUs = null.IntFrom(10) - require.Equal(t, cm, ConfigMap{"ishared": sched}) - assert.Equal(t, int64(10), cm["ishared"].GetMaxVUs()) - assert.Equal(t, 3630*time.Second, cm["ishared"].GetMaxDuration()) - assert.Empty(t, cm["ishared"].Validate()) - }}, - {`{"ishared": {"type": "shared-iterations"}}`, false, false, nil}, // Has 1 VU & 1 iter default values - {`{"ishared": {"type": "shared-iterations", "iterations": 20}}`, false, false, nil}, - {`{"ishared": {"type": "shared-iterations", "vus": 10}}`, false, true, nil}, // error because VUs are more than iters - {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "30m"}}`, false, false, nil}, - {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "-3m"}}`, false, true, nil}, - {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 10, "maxDuration": "0s"}}`, false, true, nil}, - {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": -10}}`, false, true, nil}, - {`{"ishared": {"type": "shared-iterations", "iterations": -1, "vus": 1}}`, false, true, nil}, - {`{"ishared": {"type": "shared-iterations", "iterations": 20, "vus": 30}}`, false, true, nil}, - - // per-vu-iterations - {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10}}`, - false, false, func(t *testing.T, cm ConfigMap) { - sched := NewPerVUIterationsConfig("ipervu") - sched.Iterations = null.IntFrom(20) - sched.VUs = null.IntFrom(10) - require.Equal(t, cm, ConfigMap{"ipervu": sched}) - assert.Equal(t, int64(10), cm["ipervu"].GetMaxVUs()) - assert.Equal(t, 3630*time.Second, cm["ipervu"].GetMaxDuration()) - assert.Empty(t, cm["ipervu"].Validate()) - }}, - {`{"ipervu": {"type": "per-vu-iterations"}}`, false, false, nil}, // Has 1 VU & 1 iter default values - {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20}}`, false, false, nil}, - {`{"ipervu": {"type": "per-vu-iterations", "vus": 10}}`, false, false, nil}, - {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10}}`, false, false, nil}, - {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10, "maxDuration": "-3m"}}`, false, true, nil}, - {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": 10, "maxDuration": "0s"}}`, false, true, nil}, - {`{"ipervu": {"type": "per-vu-iterations", "iterations": 20, "vus": -10}}`, false, true, nil}, - {`{"ipervu": {"type": "per-vu-iterations", "iterations": -1, "vus": 1}}`, false, true, nil}, - - // constant-arrival-rate - {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "timeUnit": "1m", "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, - false, false, func(t *testing.T, cm ConfigMap) { - sched := NewConstantArrivalRateConfig("carrival") - sched.Rate = null.IntFrom(10) - sched.Duration = types.NullDurationFrom(10 * time.Minute) - sched.TimeUnit = types.NullDurationFrom(1 * time.Minute) - sched.PreAllocatedVUs = null.IntFrom(20) - sched.MaxVUs = null.IntFrom(30) - require.Equal(t, cm, ConfigMap{"carrival": sched}) - assert.Equal(t, int64(30), cm["carrival"].GetMaxVUs()) - assert.Equal(t, 630*time.Second, cm["carrival"].GetMaxDuration()) - assert.Empty(t, cm["carrival"].Validate()) - }}, - {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, false, false, nil}, - {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30, "timeUnit": "-1s"}}`, false, true, nil}, - {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20}}`, false, true, nil}, - {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "maxVUs": 30}}`, false, true, nil}, - {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "preAllocatedVUs": 20, "maxVUs": 30}}`, false, true, nil}, - {`{"carrival": {"type": "constant-arrival-rate", "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, false, true, nil}, - {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "0m", "preAllocatedVUs": 20, "maxVUs": 30}}`, false, true, nil}, - {`{"carrival": {"type": "constant-arrival-rate", "rate": 0, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 30}}`, false, true, nil}, - {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": 20, "maxVUs": 15}}`, false, true, nil}, - {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "0s", "preAllocatedVUs": 20, "maxVUs": 25}}`, false, true, nil}, - {`{"carrival": {"type": "constant-arrival-rate", "rate": 10, "duration": "10m", "preAllocatedVUs": -2, "maxVUs": 25}}`, false, true, nil}, - - // variable-arrival-rate - {`{"varrival": {"type": "variable-arrival-rate", "startRate": 10, "timeUnit": "30s", "preAllocatedVUs": 20, "maxVUs": 50, - "stages": [{"duration": "3m", "target": 30}, {"duration": "5m", "target": 10}]}}`, - false, false, func(t *testing.T, cm ConfigMap) { - sched := NewVariableArrivalRateConfig("varrival") - sched.StartRate = null.IntFrom(10) - sched.Stages = []Stage{ - {Target: null.IntFrom(30), Duration: types.NullDurationFrom(180 * time.Second)}, - {Target: null.IntFrom(10), Duration: types.NullDurationFrom(300 * time.Second)}, - } - sched.TimeUnit = types.NullDurationFrom(30 * time.Second) - sched.PreAllocatedVUs = null.IntFrom(20) - sched.MaxVUs = null.IntFrom(50) - require.Equal(t, cm, ConfigMap{"varrival": sched}) - assert.Equal(t, int64(50), cm["varrival"].GetMaxVUs()) - assert.Equal(t, 510*time.Second, cm["varrival"].GetMaxDuration()) - assert.Empty(t, cm["varrival"].Validate()) - }}, - {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, false, false, nil}, - {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": -20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, - {`{"varrival": {"type": "variable-arrival-rate", "startRate": -1, "preAllocatedVUs": 20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, - {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, - {`{"varrival": {"type": "variable-arrival-rate", "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, - {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50}}`, false, true, nil}, - {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50, "stages": []}}`, false, true, nil}, - {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 20, "maxVUs": 50, "stages": [{"duration": "5m", "target": 10}], "timeUnit": "-1s"}}`, false, true, nil}, - {`{"varrival": {"type": "variable-arrival-rate", "preAllocatedVUs": 30, "maxVUs": 20, "stages": [{"duration": "5m", "target": 10}]}}`, false, true, nil}, -} - -func TestConfigMapParsingAndValidation(t *testing.T) { - t.Parallel() - for i, tc := range configMapTestCases { - tc := tc - t.Run(fmt.Sprintf("TestCase#%d", i), func(t *testing.T) { - t.Logf(tc.rawJSON) - var result ConfigMap - err := json.Unmarshal([]byte(tc.rawJSON), &result) - if tc.expectParseError { - require.Error(t, err) - return - } - require.NoError(t, err) - - validationErrors := result.Validate() - if tc.expectValidationError { - assert.NotEmpty(t, validationErrors) - } else { - assert.Empty(t, validationErrors) - } - if tc.customValidator != nil { - tc.customValidator(t, result) - } - }) - } -} - -//TODO: check percentage split calculations diff --git a/lib/scheduler/shared_iterations.go b/lib/scheduler/shared_iterations.go deleted file mode 100644 index d6ef2e2bb92..00000000000 --- a/lib/scheduler/shared_iterations.go +++ /dev/null @@ -1,98 +0,0 @@ -/* - * - * k6 - a next-generation load testing tool - * Copyright (C) 2019 Load Impact - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -package scheduler - -import ( - "fmt" - "time" - - "github.com/loadimpact/k6/lib/types" - null "gopkg.in/guregu/null.v3" -) - -const sharedIterationsType = "shared-iterations" - -func init() { - RegisterConfigType(sharedIterationsType, func(name string, rawJSON []byte) (Config, error) { - config := NewSharedIterationsConfig(name) - err := strictJSONUnmarshal(rawJSON, &config) - return config, err - }) -} - -// SharedIteationsConfig stores the number of VUs iterations, as well as maxDuration settings -type SharedIteationsConfig struct { - BaseConfig - VUs null.Int `json:"vus"` - Iterations null.Int `json:"iterations"` - MaxDuration types.NullDuration `json:"maxDuration"` -} - -// NewSharedIterationsConfig returns a SharedIteationsConfig with default values -func NewSharedIterationsConfig(name string) SharedIteationsConfig { - return SharedIteationsConfig{ - BaseConfig: NewBaseConfig(name, sharedIterationsType, false), - VUs: null.NewInt(1, false), - Iterations: null.NewInt(1, false), - MaxDuration: types.NewNullDuration(1*time.Hour, false), - } -} - -// Make sure we implement the Config interface -var _ Config = &SharedIteationsConfig{} - -// Validate makes sure all options are configured and valid -func (sic SharedIteationsConfig) Validate() []error { - errors := sic.BaseConfig.Validate() - if sic.VUs.Int64 <= 0 { - errors = append(errors, fmt.Errorf("the number of VUs should be more than 0")) - } - - if sic.Iterations.Int64 < sic.VUs.Int64 { - errors = append(errors, fmt.Errorf( - "the number of iterations (%d) shouldn't be less than the number of VUs (%d)", - sic.Iterations.Int64, sic.VUs.Int64, - )) - } - - if time.Duration(sic.MaxDuration.Duration) < minDuration { - errors = append(errors, fmt.Errorf( - "the maxDuration should be at least %s, but is %s", minDuration, sic.MaxDuration, - )) - } - - return errors -} - -// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs -func (sic SharedIteationsConfig) GetMaxVUs() int64 { - return sic.VUs.Int64 -} - -// GetMaxDuration returns the maximum duration time for this scheduler, including -// the specified iterationTimeout, if the iterations are uninterruptible -func (sic SharedIteationsConfig) GetMaxDuration() time.Duration { - maxDuration := sic.MaxDuration.Duration - if !sic.Interruptible.Bool { - maxDuration += sic.IterationTimeout.Duration - } - return time.Duration(maxDuration) -} diff --git a/lib/scheduler/variable_arrival_rate.go b/lib/scheduler/variable_arrival_rate.go deleted file mode 100644 index 76ba7a51dcb..00000000000 --- a/lib/scheduler/variable_arrival_rate.go +++ /dev/null @@ -1,111 +0,0 @@ -/* - * - * k6 - a next-generation load testing tool - * Copyright (C) 2019 Load Impact - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -package scheduler - -import ( - "fmt" - "time" - - "github.com/loadimpact/k6/lib/types" - null "gopkg.in/guregu/null.v3" -) - -const variableArrivalRateType = "variable-arrival-rate" - -func init() { - RegisterConfigType(variableArrivalRateType, func(name string, rawJSON []byte) (Config, error) { - config := NewVariableArrivalRateConfig(name) - err := strictJSONUnmarshal(rawJSON, &config) - return config, err - }) -} - -// VariableArrivalRateConfig stores config for the variable arrival-rate scheduler -type VariableArrivalRateConfig struct { - BaseConfig - StartRate null.Int `json:"startRate"` - TimeUnit types.NullDuration `json:"timeUnit"` - Stages []Stage `json:"stages"` - - // Initialize `PreAllocatedVUs` number of VUs, and if more than that are needed, - // they will be dynamically allocated, until `MaxVUs` is reached, which is an - // absolutely hard limit on the number of VUs the scheduler will use - PreAllocatedVUs null.Int `json:"preAllocatedVUs"` - MaxVUs null.Int `json:"maxVUs"` -} - -// NewVariableArrivalRateConfig returns a VariableArrivalRateConfig with default values -func NewVariableArrivalRateConfig(name string) VariableArrivalRateConfig { - return VariableArrivalRateConfig{ - BaseConfig: NewBaseConfig(name, variableArrivalRateType, false), - TimeUnit: types.NewNullDuration(1*time.Second, false), - } -} - -// Make sure we implement the Config interface -var _ Config = &VariableArrivalRateConfig{} - -// Validate makes sure all options are configured and valid -func (varc VariableArrivalRateConfig) Validate() []error { - errors := varc.BaseConfig.Validate() - - if varc.StartRate.Int64 < 0 { - errors = append(errors, fmt.Errorf("the startRate value shouldn't be negative")) - } - - if time.Duration(varc.TimeUnit.Duration) < 0 { - errors = append(errors, fmt.Errorf("the timeUnit should be more than 0")) - } - - errors = append(errors, validateStages(varc.Stages)...) - - if !varc.PreAllocatedVUs.Valid { - errors = append(errors, fmt.Errorf("the number of preAllocatedVUs isn't specified")) - } else if varc.PreAllocatedVUs.Int64 < 0 { - errors = append(errors, fmt.Errorf("the number of preAllocatedVUs shouldn't be negative")) - } - - if !varc.MaxVUs.Valid { - errors = append(errors, fmt.Errorf("the number of maxVUs isn't specified")) - } else if varc.MaxVUs.Int64 < varc.PreAllocatedVUs.Int64 { - errors = append(errors, fmt.Errorf("maxVUs shouldn't be less than preAllocatedVUs")) - } - - return errors -} - -// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs -func (varc VariableArrivalRateConfig) GetMaxVUs() int64 { - return varc.MaxVUs.Int64 -} - -// GetMaxDuration returns the maximum duration time for this scheduler, including -// the specified iterationTimeout, if the iterations are uninterruptible -func (varc VariableArrivalRateConfig) GetMaxDuration() time.Duration { - var maxDuration types.Duration - for _, s := range varc.Stages { - maxDuration += s.Duration.Duration - } - if !varc.Interruptible.Bool { - maxDuration += varc.IterationTimeout.Duration - } - return time.Duration(maxDuration) -} diff --git a/lib/scheduler/variable_looping_vus.go b/lib/scheduler/variable_looping_vus.go deleted file mode 100644 index f1ab3de1769..00000000000 --- a/lib/scheduler/variable_looping_vus.go +++ /dev/null @@ -1,94 +0,0 @@ -/* - * - * k6 - a next-generation load testing tool - * Copyright (C) 2019 Load Impact - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -package scheduler - -import ( - "fmt" - "time" - - "github.com/loadimpact/k6/lib/types" - null "gopkg.in/guregu/null.v3" -) - -const variableLoopingVUsType = "variable-looping-vus" - -func init() { - RegisterConfigType(variableLoopingVUsType, func(name string, rawJSON []byte) (Config, error) { - config := NewVariableLoopingVUsConfig(name) - err := strictJSONUnmarshal(rawJSON, &config) - return config, err - }) -} - -// Stage contains -type Stage struct { - Duration types.NullDuration `json:"duration"` - Target null.Int `json:"target"` // TODO: maybe rename this to endVUs? something else? -} - -// VariableLoopingVUsConfig stores the configuration for the stages scheduler -type VariableLoopingVUsConfig struct { - BaseConfig - StartVUs null.Int `json:"startVUs"` - Stages []Stage `json:"stages"` -} - -// NewVariableLoopingVUsConfig returns a VariableLoopingVUsConfig with its default values -func NewVariableLoopingVUsConfig(name string) VariableLoopingVUsConfig { - return VariableLoopingVUsConfig{BaseConfig: NewBaseConfig(name, variableLoopingVUsType, false)} -} - -// Make sure we implement the Config interface -var _ Config = &VariableLoopingVUsConfig{} - -// Validate makes sure all options are configured and valid -func (vlvc VariableLoopingVUsConfig) Validate() []error { - errors := vlvc.BaseConfig.Validate() - if vlvc.StartVUs.Int64 < 0 { - errors = append(errors, fmt.Errorf("the number of start VUs shouldn't be negative")) - } - - return append(errors, validateStages(vlvc.Stages)...) -} - -// GetMaxVUs returns the absolute maximum number of possible concurrently running VUs -func (vlvc VariableLoopingVUsConfig) GetMaxVUs() int64 { - maxVUs := vlvc.StartVUs.Int64 - for _, s := range vlvc.Stages { - if s.Target.Int64 > maxVUs { - maxVUs = s.Target.Int64 - } - } - return maxVUs -} - -// GetMaxDuration returns the maximum duration time for this scheduler, including -// the specified iterationTimeout, if the iterations are uninterruptible -func (vlvc VariableLoopingVUsConfig) GetMaxDuration() time.Duration { - var maxDuration types.Duration - for _, s := range vlvc.Stages { - maxDuration += s.Duration.Duration - } - if !vlvc.Interruptible.Bool { - maxDuration += vlvc.IterationTimeout.Duration - } - return time.Duration(maxDuration) -} diff --git a/lib/state.go b/lib/state.go index 08df1ee05f9..464604d3253 100644 --- a/lib/state.go +++ b/lib/state.go @@ -67,4 +67,14 @@ type State struct { BPool *bpool.BufferPool Vu, Iteration int64 + Tags map[string]string +} + +// CloneTags makes a copy of the tags map and returns it. +func (s *State) CloneTags() map[string]string { + tags := make(map[string]string, len(s.Tags)) + for k, v := range s.Tags { + tags[k] = v + } + return tags } diff --git a/lib/testutils/env.go b/lib/testutils/env.go new file mode 100644 index 00000000000..6ad78d9f848 --- /dev/null +++ b/lib/testutils/env.go @@ -0,0 +1,62 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package testutils + +import ( + "os" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +// SetEnv is a helper funcion for setting arbitrary environment variables and +// restoring the old ones at the end, usually by deferring the returned callback +// TODO: remove these hacks when we improve the configuration (hopefully +// completely, see https://github.com/loadimpact/k6/issues/883)... we shouldn't +// have to mess with the global environment at all... +func SetEnv(t *testing.T, newEnv []string) (restoreEnv func()) { + actuallSetEnv := func(env []string, abortOnSetErr bool) { + os.Clearenv() + for _, e := range env { + val := "" + pair := strings.SplitN(e, "=", 2) + if len(pair) > 1 { + val = pair[1] + } + err := os.Setenv(pair[0], val) + if abortOnSetErr { + require.NoError(t, err) + } else if err != nil { + t.Logf( + "Received a non-aborting but unexpected error '%s' when setting env.var '%s' to '%s'", + err, pair[0], val, + ) + } + } + } + oldEnv := os.Environ() + actuallSetEnv(newEnv, true) + + return func() { + actuallSetEnv(oldEnv, false) + } +} diff --git a/lib/testutils/httpmultibin/httpmultibin.go b/lib/testutils/httpmultibin/httpmultibin.go index e905997cdd4..3feb268bea2 100644 --- a/lib/testutils/httpmultibin/httpmultibin.go +++ b/lib/testutils/httpmultibin/httpmultibin.go @@ -40,13 +40,14 @@ import ( "github.com/andybalholm/brotli" "github.com/gorilla/websocket" "github.com/klauspost/compress/zstd" - "github.com/loadimpact/k6/lib/netext" - "github.com/loadimpact/k6/lib/netext/httpext" "github.com/mccutchen/go-httpbin/httpbin" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/http2" + + "github.com/loadimpact/k6/lib/netext" + "github.com/loadimpact/k6/lib/netext/httpext" ) // GetTLSClientConfig returns a TLS config that trusts the supplied diff --git a/lib/testutils/logrus_hook.go b/lib/testutils/logrus_hook.go index 751473184d2..5c41d855639 100644 --- a/lib/testutils/logrus_hook.go +++ b/lib/testutils/logrus_hook.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package testutils import ( diff --git a/lib/testutils/minirunner/minirunner.go b/lib/testutils/minirunner/minirunner.go new file mode 100644 index 00000000000..6ca17adc406 --- /dev/null +++ b/lib/testutils/minirunner/minirunner.go @@ -0,0 +1,183 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package minirunner + +import ( + "context" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/stats" +) + +// Ensure mock implementations conform to the interfaces. +var ( + _ lib.Runner = &MiniRunner{} + _ lib.InitializedVU = &VU{} + _ lib.ActiveVU = &ActiveVU{} +) + +// MiniRunner partially implements the lib.Runner interface, but instead of +// using a real JS runtime, it allows us to directly specify the options and +// functions with Go code. +type MiniRunner struct { + Fn func(ctx context.Context, out chan<- stats.SampleContainer) error + SetupFn func(ctx context.Context, out chan<- stats.SampleContainer) ([]byte, error) + TeardownFn func(ctx context.Context, out chan<- stats.SampleContainer) error + + SetupData []byte + + NextVUID int64 + Group *lib.Group + Options lib.Options +} + +// MakeArchive isn't implemented, it always returns nil and is just here to +// satisfy the lib.Runner interface. +func (r MiniRunner) MakeArchive() *lib.Archive { + return nil +} + +// NewVU returns a new VU with an incremental ID. +func (r *MiniRunner) NewVU(id int64, out chan<- stats.SampleContainer) (lib.InitializedVU, error) { + return &VU{R: r, Out: out, ID: id}, nil +} + +// Setup calls the supplied mock setup() function, if present. +func (r *MiniRunner) Setup(ctx context.Context, out chan<- stats.SampleContainer) (err error) { + if fn := r.SetupFn; fn != nil { + r.SetupData, err = fn(ctx, out) + } + return +} + +// GetSetupData returns json representation of the setup data if setup() is +// specified and was ran, nil otherwise. +func (r MiniRunner) GetSetupData() []byte { + return r.SetupData +} + +// SetSetupData saves the externally supplied setup data as JSON in the runner. +func (r *MiniRunner) SetSetupData(data []byte) { + r.SetupData = data +} + +// Teardown calls the supplied mock teardown() function, if present. +func (r MiniRunner) Teardown(ctx context.Context, out chan<- stats.SampleContainer) error { + if fn := r.TeardownFn; fn != nil { + return fn(ctx, out) + } + return nil +} + +// GetDefaultGroup returns the default group. +func (r MiniRunner) GetDefaultGroup() *lib.Group { + if r.Group == nil { + r.Group = &lib.Group{} + } + return r.Group +} + +// IsExecutable satisfies lib.Runner, but is mocked for MiniRunner since +// it doesn't deal with JS. +func (r MiniRunner) IsExecutable(name string) bool { + return true +} + +// GetOptions returns the supplied options struct. +func (r MiniRunner) GetOptions() lib.Options { + return r.Options +} + +// SetOptions allows you to override the runner options. +func (r *MiniRunner) SetOptions(opts lib.Options) error { + r.Options = opts + return nil +} + +// VU is a mock VU, spawned by a MiniRunner. +type VU struct { + R *MiniRunner + Out chan<- stats.SampleContainer + ID int64 + Iteration int64 +} + +// ActiveVU holds a VU and its activation parameters +type ActiveVU struct { + *VU + *lib.VUActivationParams + busy chan struct{} +} + +// GetID returns the unique VU ID. +func (vu *VU) GetID() int64 { + return vu.ID +} + +// Activate the VU so it will be able to run code. +func (vu *VU) Activate(params *lib.VUActivationParams) lib.ActiveVU { + avu := &ActiveVU{ + VU: vu, + VUActivationParams: params, + busy: make(chan struct{}, 1), + } + + go func() { + <-params.RunContext.Done() + + // Wait for the VU to stop running, if it was, and prevent it from + // running again for this activation + avu.busy <- struct{}{} + + if params.DeactivateCallback != nil { + params.DeactivateCallback(vu) + } + }() + + return avu +} + +// RunOnce runs the mock default function once, incrementing its iteration. +func (vu *ActiveVU) RunOnce() error { + if vu.R.Fn == nil { + return nil + } + + select { + case <-vu.RunContext.Done(): + return vu.RunContext.Err() // we are done, return + case vu.busy <- struct{}{}: + // nothing else can run now, and the VU cannot be deactivated + } + defer func() { + <-vu.busy // unlock deactivation again + }() + + state := &lib.State{ + Vu: vu.ID, + Iteration: vu.Iteration, + } + newctx := lib.WithState(vu.RunContext, state) + + vu.Iteration++ + + return vu.R.Fn(newctx, vu.Out) +} diff --git a/lib/scheduler/interfaces.go b/lib/testutils/test_output.go similarity index 60% rename from lib/scheduler/interfaces.go rename to lib/testutils/test_output.go index 764df42ae85..67a8ea284c3 100644 --- a/lib/scheduler/interfaces.go +++ b/lib/testutils/test_output.go @@ -18,16 +18,24 @@ * */ -package scheduler +package testutils -import "time" +import ( + "io" + "testing" +) -// Config is an interface that should be implemented by all scheduler config types -type Config interface { - GetBaseConfig() BaseConfig - Validate() []error - GetMaxVUs() int64 - GetMaxDuration() time.Duration // includes max timeouts, to allow us to share VUs between schedulers in the future - //TODO: Split(percentages []float64) ([]Config, error) - //TODO: String() method that could be used for priting descriptions of the currently running schedulers for the UI? +// Something that makes the test also be a valid io.Writer, useful for passing it +// as an output for logs and CLI flag help messages... +type testOutput struct{ *testing.T } + +func (to testOutput) Write(p []byte) (n int, err error) { + to.Logf("%s", p) + return len(p), nil +} + +// NewTestOutput returns a simple io.Writer implementation that uses the test's +// logger as an output. +func NewTestOutput(t *testing.T) io.Writer { + return testOutput{t} } diff --git a/lib/timeout_error.go b/lib/timeout_error.go index 682fe000bf9..16cccded9e9 100644 --- a/lib/timeout_error.go +++ b/lib/timeout_error.go @@ -1,16 +1,30 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package lib import ( "fmt" "time" -) -//nolint:gochecknoglobals -// Keep stages in sync with js/runner.go -// We set it here to prevent import cycle. -var ( - stageSetup = "setup" - stageTeardown = "teardown" + "github.com/loadimpact/k6/lib/consts" ) // TimeoutError is used when somethings timeouts @@ -27,7 +41,7 @@ func NewTimeoutError(place string, d time.Duration) TimeoutError { // String returns timeout error in human readable format. func (t TimeoutError) String() string { - return fmt.Sprintf("%s execution timed out after %.f seconds", t.place, t.d.Seconds()) + return fmt.Sprintf("%s() execution timed out after %.f seconds", t.place, t.d.Seconds()) } // Error implements error interface. @@ -45,9 +59,9 @@ func (t TimeoutError) Hint() string { hint := "" switch t.place { - case stageSetup: + case consts.SetupFn: hint = "You can increase the time limit via the setupTimeout option" - case stageTeardown: + case consts.TeardownFn: hint = "You can increase the time limit via the teardownTimeout option" } return hint diff --git a/lib/timeout_error_test.go b/lib/timeout_error_test.go index 86232fd1dd8..99ae2d7b063 100644 --- a/lib/timeout_error_test.go +++ b/lib/timeout_error_test.go @@ -1,9 +1,31 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package lib import ( "strings" "testing" "time" + + "github.com/loadimpact/k6/lib/consts" ) func TestTimeoutError(t *testing.T) { @@ -11,8 +33,8 @@ func TestTimeoutError(t *testing.T) { stage, expectedStrContain string d time.Duration }{ - {"setup", "1 seconds", time.Second}, - {"teardown", "2 seconds", time.Second * 2}, + {consts.SetupFn, "1 seconds", time.Second}, + {consts.TeardownFn, "2 seconds", time.Second * 2}, {"", "0 seconds", time.Duration(0)}, } @@ -29,8 +51,8 @@ func TestTimeoutErrorHint(t *testing.T) { stage string empty bool }{ - {"setup", false}, - {"teardown", false}, + {consts.SetupFn, false}, + {consts.TeardownFn, false}, {"not handle", true}, } diff --git a/lib/types/types.go b/lib/types/types.go index b1c503f98b1..e9ffeab037f 100644 --- a/lib/types/types.go +++ b/lib/types/types.go @@ -25,9 +25,11 @@ import ( "encoding/json" "fmt" "reflect" + "strconv" + "strings" "time" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" ) // NullDecoder converts data with expected type f to a guregu/null value @@ -83,6 +85,8 @@ func NullDecoder(f reflect.Type, t reflect.Type, data interface{}) (interface{}, return data, nil } +//TODO: something better that won't require so much boilerplate and casts for NullDuration values... + // Duration is an alias for time.Duration that de/serialises to JSON as human-readable strings. type Duration time.Duration @@ -90,9 +94,38 @@ func (d Duration) String() string { return time.Duration(d).String() } +// ParseExtendedDuration is a helper function that allows for string duration +// values containing days. +func ParseExtendedDuration(data string) (result time.Duration, err error) { + dPos := strings.IndexByte(data, 'd') + if dPos < 0 { + return time.ParseDuration(data) + } + + var hours time.Duration + if dPos+1 < len(data) { // case "12d" + hours, err = time.ParseDuration(data[dPos+1:]) + if err != nil { + return + } + if hours < 0 { + return 0, fmt.Errorf("invalid time format '%s'", data[dPos+1:]) + } + } + + days, err := strconv.ParseInt(data[:dPos], 10, 64) + if err != nil { + return + } + if days < 0 { + hours = -hours + } + return time.Duration(days)*24*time.Hour + hours, nil +} + // UnmarshalText converts text data to Duration func (d *Duration) UnmarshalText(data []byte) error { - v, err := time.ParseDuration(string(data)) + v, err := ParseExtendedDuration(string(data)) if err != nil { return err } @@ -108,7 +141,7 @@ func (d *Duration) UnmarshalJSON(data []byte) error { return err } - v, err := time.ParseDuration(str) + v, err := ParseExtendedDuration(str) if err != nil { return err } diff --git a/lib/types/types_test.go b/lib/types/types_test.go index d98db6473f2..d54f6c474c8 100644 --- a/lib/types/types_test.go +++ b/lib/types/types_test.go @@ -23,13 +23,14 @@ package types import ( "encoding/json" "fmt" + "math" "testing" "time" "github.com/mitchellh/mapstructure" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" ) func TestNullDecoder(t *testing.T) { @@ -99,6 +100,55 @@ func TestNullDecoder(t *testing.T) { } } +func TestParseExtendedDuration(t *testing.T) { + testCases := []struct { + durStr string + expErr bool + expDur time.Duration + }{ + {"", true, 0}, + {"d", true, 0}, + {"d2h", true, 0}, + {"d2h", true, 0}, + {"2.1d", true, 0}, + {"2d-2h", true, 0}, + {"-2d-2h", true, 0}, + {"2+d", true, 0}, + {"2da", true, 0}, + {"2-d", true, 0}, + {"1.12s", false, 1120 * time.Millisecond}, + {"0d1.12s", false, 1120 * time.Millisecond}, + {"10d1.12s", false, 240*time.Hour + 1120*time.Millisecond}, + {"1s", false, 1 * time.Second}, + {"1d", false, 24 * time.Hour}, + {"20d", false, 480 * time.Hour}, + {"1d23h", false, 47 * time.Hour}, + {"1d24h15m", false, 48*time.Hour + 15*time.Minute}, + {"1d25h80m", false, 50*time.Hour + 20*time.Minute}, + {"0d25h120m80s", false, 27*time.Hour + 80*time.Second}, + {"-1d2h", false, -26 * time.Hour}, + {"-1d24h", false, -48 * time.Hour}, + {"2d1ns", false, 48*time.Hour + 1}, + {"-2562047h47m16.854775807s", false, time.Duration(math.MinInt64 + 1)}, + {"-106751d23h47m16.854775807s", false, time.Duration(math.MinInt64 + 1)}, + {"2562047h47m16.854775807s", false, time.Duration(math.MaxInt64)}, + {"106751d23h47m16.854775807s", false, time.Duration(math.MaxInt64)}, + } + + for _, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("tc_%s_exp", tc.durStr), func(t *testing.T) { + result, err := ParseExtendedDuration(tc.durStr) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expDur, result) + } + }) + } +} + func TestDuration(t *testing.T) { t.Run("String", func(t *testing.T) { assert.Equal(t, "1m15s", Duration(75*time.Second).String()) @@ -120,6 +170,11 @@ func TestDuration(t *testing.T) { assert.NoError(t, json.Unmarshal([]byte(`"1m15s"`), &d)) assert.Equal(t, Duration(75*time.Second), d) }) + t.Run("Extended", func(t *testing.T) { + var d Duration + assert.NoError(t, json.Unmarshal([]byte(`"1d2h1m15s"`), &d)) + assert.Equal(t, Duration(26*time.Hour+75*time.Second), d) + }) }) t.Run("Marshal", func(t *testing.T) { d := Duration(75 * time.Second) diff --git a/lib/util.go b/lib/util.go index 60c6966e22e..2be0b770c5b 100644 --- a/lib/util.go +++ b/lib/util.go @@ -22,21 +22,8 @@ package lib import ( "strings" - - "github.com/loadimpact/k6/lib/types" ) -// Returns the total sum of time taken by the given set of stages. -func SumStages(stages []Stage) (d types.NullDuration) { - for _, stage := range stages { - d.Valid = stage.Duration.Valid - if stage.Duration.Valid { - d.Duration += stage.Duration.Duration - } - } - return d -} - // Splits a string in the form "key=value". func SplitKV(s string) (key, value string) { parts := strings.SplitN(s, "=", 2) diff --git a/lib/util_test.go b/lib/util_test.go index ea484bab297..119f43ba132 100644 --- a/lib/util_test.go +++ b/lib/util_test.go @@ -24,12 +24,12 @@ import ( "fmt" "strconv" "testing" - "time" - "github.com/loadimpact/k6/lib/types" "github.com/stretchr/testify/assert" ) +//TODO: update test +/* func TestSumStages(t *testing.T) { testdata := map[string]struct { Time types.NullDuration @@ -59,6 +59,7 @@ func TestSumStages(t *testing.T) { }) } } +*/ func TestSplitKV(t *testing.T) { testdata := map[string]struct { diff --git a/loader/filesystems.go b/loader/filesystems.go index 2bfde5c1205..e9e28a0ba38 100644 --- a/loader/filesystems.go +++ b/loader/filesystems.go @@ -1,10 +1,31 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package loader import ( "runtime" - "github.com/loadimpact/k6/lib/fsext" "github.com/spf13/afero" + + "github.com/loadimpact/k6/lib/fsext" ) // CreateFilesystems creates the correct filesystem map for the current OS diff --git a/loader/loader.go b/loader/loader.go index 3f73e378b7f..f72ae4b2773 100644 --- a/loader/loader.go +++ b/loader/loader.go @@ -171,8 +171,8 @@ func Load( ) (*SourceData, error) { logrus.WithFields( logrus.Fields{ - "moduleSpecifier": moduleSpecifier, - "original moduleSpecifier": originalModuleSpecifier, + "moduleSpecifier": moduleSpecifier, + "originalModuleSpecifier": originalModuleSpecifier, }).Debug("Loading...") var pathOnFs string diff --git a/loader/loader_test.go b/loader/loader_test.go index afb3b29ce81..f435b0b0fe9 100644 --- a/loader/loader_test.go +++ b/loader/loader_test.go @@ -27,11 +27,12 @@ import ( "path/filepath" "testing" - "github.com/loadimpact/k6/lib/testutils/httpmultibin" - "github.com/loadimpact/k6/loader" "github.com/spf13/afero" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/loadimpact/k6/lib/testutils/httpmultibin" + "github.com/loadimpact/k6/loader" ) func TestDir(t *testing.T) { diff --git a/loader/readsource.go b/loader/readsource.go index 4def2c15582..2da35d081ae 100644 --- a/loader/readsource.go +++ b/loader/readsource.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package loader import ( @@ -8,8 +28,9 @@ import ( "net/url" "path/filepath" - "github.com/loadimpact/k6/lib/fsext" "github.com/spf13/afero" + + "github.com/loadimpact/k6/lib/fsext" ) // ReadSource Reads a source file from any supported destination. diff --git a/loader/readsource_test.go b/loader/readsource_test.go index e962f304f87..72c319ec3c0 100644 --- a/loader/readsource_test.go +++ b/loader/readsource_test.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package loader import ( @@ -6,10 +26,11 @@ import ( "net/url" "testing" - "github.com/loadimpact/k6/lib/fsext" "github.com/pkg/errors" "github.com/spf13/afero" "github.com/stretchr/testify/require" + + "github.com/loadimpact/k6/lib/fsext" ) type errorReader string diff --git a/stats/cloud/api.go b/stats/cloud/api.go index 684bf45a846..adc0ab8885f 100644 --- a/stats/cloud/api.go +++ b/stats/cloud/api.go @@ -29,8 +29,9 @@ import ( "net/http" "strconv" - "github.com/loadimpact/k6/lib" "github.com/pkg/errors" + + "github.com/loadimpact/k6/lib" ) type ResultStatus int diff --git a/stats/cloud/api_test.go b/stats/cloud/api_test.go index 3a7f234a5b1..840be9cda99 100644 --- a/stats/cloud/api_test.go +++ b/stats/cloud/api_test.go @@ -33,9 +33,10 @@ import ( "testing" "time" - "github.com/loadimpact/k6/lib/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/loadimpact/k6/lib/types" ) func init() { diff --git a/stats/cloud/bench_test.go b/stats/cloud/bench_test.go index eb9b2e7d2a0..72b62e383fd 100644 --- a/stats/cloud/bench_test.go +++ b/stats/cloud/bench_test.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package cloud import ( @@ -11,16 +31,16 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/netext/httpext" "github.com/loadimpact/k6/lib/testutils/httpmultibin" "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/loader" "github.com/loadimpact/k6/stats" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gopkg.in/guregu/null.v3" ) // script to clean the logs: `perl -p -e "s/time=\".*\n//g"` @@ -59,7 +79,7 @@ func BenchmarkCloud(b *testing.B) { Host: null.StringFrom(tb.ServerHTTP.URL), NoCompress: null.BoolFrom(true), }) - collector, err := New(config, script, options, "1.0") + collector, err := New(config, script, options, []lib.ExecutionStep{}, "1.0") require.NoError(b, err) now := time.Now() tags := stats.IntoSampleTags(&map[string]string{"test": "mest", "a": "b", "url": "something", "name": "else"}) diff --git a/stats/cloud/collector.go b/stats/cloud/collector.go index adc0be8e39c..013c3eb0403 100644 --- a/stats/cloud/collector.go +++ b/stats/cloud/collector.go @@ -48,9 +48,10 @@ type Collector struct { config Config referenceID string - duration int64 - thresholds map[string][]*stats.Threshold - client *Client + executionPlan []lib.ExecutionStep + duration int64 // in seconds + thresholds map[string][]*stats.Threshold + client *Client anonymous bool runStatus lib.RunStatus @@ -100,7 +101,9 @@ func MergeFromExternal(external map[string]json.RawMessage, conf *Config) error } // New creates a new cloud collector -func New(conf Config, src *loader.SourceData, opts lib.Options, version string) (*Collector, error) { +func New( + conf Config, src *loader.SourceData, opts lib.Options, executionPlan []lib.ExecutionStep, version string, +) (*Collector, error) { if err := MergeFromExternal(opts.External, &conf); err != nil { return nil, err } @@ -121,16 +124,9 @@ func New(conf Config, src *loader.SourceData, opts lib.Options, version string) thresholds[name] = append(thresholds[name], t.Thresholds...) } - // Sum test duration from options. -1 for unknown duration. - var duration int64 = -1 - if len(opts.Stages) > 0 { - duration = sumStages(opts.Stages) - } else if opts.Duration.Valid { - duration = int64(time.Duration(opts.Duration.Duration).Seconds()) - } - - if duration == -1 { - return nil, errors.New("Tests with unspecified duration are not allowed when using Load Impact Insights") + duration, testEnds := lib.GetEndOffset(executionPlan) + if !testEnds { + return nil, errors.New("tests with unspecified duration are not allowed when outputting data to k6 cloud") } if !conf.Token.Valid && conf.DeprecatedToken.Valid { @@ -143,7 +139,8 @@ func New(conf Config, src *loader.SourceData, opts lib.Options, version string) thresholds: thresholds, client: NewClient(conf.Token.String, conf.Host.String, version), anonymous: !conf.Token.Valid, - duration: duration, + executionPlan: executionPlan, + duration: int64(duration / time.Second), opts: opts, aggrBuckets: map[int64]aggregationBucket{}, stopSendingMetricsCh: make(chan struct{}), @@ -153,6 +150,12 @@ func New(conf Config, src *loader.SourceData, opts lib.Options, version string) // Init is called between the collector's creation and the call to Run(). // You should do any lengthy setup here rather than in New. func (c *Collector) Init() error { + if c.config.PushRefID.Valid { + c.referenceID = c.config.PushRefID.String + logrus.WithField("referenceId", c.referenceID).Debug("Cloud: directly pushing metrics without init") + return nil + } + thresholds := make(map[string][]string) for name, t := range c.thresholds { @@ -160,11 +163,12 @@ func (c *Collector) Init() error { thresholds[name] = append(thresholds[name], threshold.Source) } } + maxVUs := lib.GetMaxPossibleVUs(c.executionPlan) testRun := &TestRun{ Name: c.config.Name.String, ProjectID: c.config.ProjectID.Int64, - VUsMax: c.opts.VUsMax.Int64, + VUsMax: int64(maxVUs), Thresholds: thresholds, Duration: c.duration, } @@ -295,7 +299,7 @@ func (c *Collector) Collect(sampleContainers []stats.SampleContainer) { newSamples = append(newSamples, NewSampleFromTrail(sc)) } case *netext.NetTrail: - //TODO: aggregate? + // TODO: aggregate? values := map[string]float64{ metrics.DataSent.Name: float64(sc.BytesWritten), metrics.DataReceived.Name: float64(sc.BytesRead), @@ -313,7 +317,8 @@ func (c *Collector) Collect(sampleContainers []stats.SampleContainer) { Time: Timestamp(sc.GetTime()), Tags: sc.GetTags(), Values: values, - }}) + }, + }) default: for _, sample := range sampleContainer.GetSamples() { newSamples = append(newSamples, &Sample{ @@ -511,7 +516,7 @@ func (c *Collector) pushMetrics() { }).Debug("Pushing metrics to cloud") for len(buffer) > 0 { - var size = len(buffer) + size := len(buffer) if size > int(c.config.MaxMetricSamplesPerPackage.Int64) { size = int(c.config.MaxMetricSamplesPerPackage.Int64) } @@ -529,7 +534,7 @@ func (c *Collector) pushMetrics() { } func (c *Collector) testFinished() { - if c.referenceID == "" { + if c.referenceID == "" || c.config.PushRefID.Valid { return } @@ -563,15 +568,6 @@ func (c *Collector) testFinished() { } } -func sumStages(stages []lib.Stage) int64 { - var total time.Duration - for _, stage := range stages { - total += time.Duration(stage.Duration.Duration) - } - - return int64(total.Seconds()) -} - // GetRequiredSystemTags returns which sample tags are needed by this collector func (c *Collector) GetRequiredSystemTags() stats.SystemTagSet { return stats.TagName | stats.TagMethod | stats.TagStatus | stats.TagError | stats.TagCheck | stats.TagGroup diff --git a/stats/cloud/collector_test.go b/stats/cloud/collector_test.go index 2b44f47fcae..250409962cb 100644 --- a/stats/cloud/collector_test.go +++ b/stats/cloud/collector_test.go @@ -33,11 +33,9 @@ import ( "testing" "time" - "gopkg.in/guregu/null.v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/lib/metrics" @@ -186,7 +184,7 @@ func runCloudCollectorTestCase(t *testing.T, minSamples int) { Host: null.StringFrom(tb.ServerHTTP.URL), NoCompress: null.BoolFrom(true), }) - collector, err := New(config, script, options, "1.0") + collector, err := New(config, script, options, []lib.ExecutionStep{}, "1.0") require.NoError(t, err) assert.True(t, collector.config.Host.Valid) @@ -214,6 +212,7 @@ func runCloudCollectorTestCase(t *testing.T, minSamples int) { expectedTags := stats.IntoSampleTags(&expectedTagMap) expSamples := make(chan []Sample) + defer close(expSamples) tb.Mux.HandleFunc(fmt.Sprintf("/v1/metrics/%s", collector.referenceID), getSampleChecker(t, expSamples)) tb.Mux.HandleFunc(fmt.Sprintf("/v1/tests/%s", collector.referenceID), func(rw http.ResponseWriter, _ *http.Request) { rw.WriteHeader(http.StatusOK) // silence a test warning @@ -343,7 +342,7 @@ func TestCloudCollectorMaxPerPacket(t *testing.T) { Host: null.StringFrom(tb.ServerHTTP.URL), NoCompress: null.BoolFrom(true), }) - collector, err := New(config, script, options, "1.0") + collector, err := New(config, script, options, []lib.ExecutionStep{}, "1.0") require.NoError(t, err) now := time.Now() tags := stats.IntoSampleTags(&map[string]string{"test": "mest", "a": "b"}) @@ -432,10 +431,11 @@ func TestCloudCollectorStopSendingMetric(t *testing.T) { } config := NewConfig().Apply(Config{ - Host: null.StringFrom(tb.ServerHTTP.URL), - NoCompress: null.BoolFrom(true), + Host: null.StringFrom(tb.ServerHTTP.URL), + NoCompress: null.BoolFrom(true), + MaxMetricSamplesPerPackage: null.IntFrom(50), }) - collector, err := New(config, script, options, "1.0") + collector, err := New(config, script, options, []lib.ExecutionStep{}, "1.0") require.NoError(t, err) now := time.Now() tags := stats.IntoSampleTags(&map[string]string{"test": "mest", "a": "b"}) @@ -504,8 +504,12 @@ func TestCloudCollectorStopSendingMetric(t *testing.T) { cancel() wg.Wait() require.Equal(t, lib.RunStatusQueued, collector.runStatus) - _, ok := <-collector.stopSendingMetricsCh - require.False(t, ok) + select { + case <-collector.stopSendingMetricsCh: + // all is fine + default: + t.Fatal("sending metrics wasn't stopped") + } require.Equal(t, max, count) nBufferSamples := len(collector.bufferSamples) @@ -551,7 +555,7 @@ func TestCloudCollectorAggregationPeriodZeroNoBlock(t *testing.T) { Host: null.StringFrom(tb.ServerHTTP.URL), NoCompress: null.BoolFrom(true), }) - collector, err := New(config, script, options, "1.0") + collector, err := New(config, script, options, []lib.ExecutionStep{}, "1.0") require.NoError(t, err) assert.True(t, collector.config.Host.Valid) @@ -572,6 +576,7 @@ func TestCloudCollectorAggregationPeriodZeroNoBlock(t *testing.T) { assert.Equal(t, types.Duration(5*time.Millisecond), collector.config.AggregationWaitPeriod.Duration) expSamples := make(chan []Sample) + defer close(expSamples) tb.Mux.HandleFunc(fmt.Sprintf("/v1/metrics/%s", collector.referenceID), getSampleChecker(t, expSamples)) ctx, cancel := context.WithCancel(context.Background()) @@ -609,7 +614,7 @@ func TestCloudCollectorRecvIterLIAllIterations(t *testing.T) { Host: null.StringFrom(tb.ServerHTTP.URL), NoCompress: null.BoolFrom(true), }) - collector, err := New(config, script, options, "1.0") + collector, err := New(config, script, options, []lib.ExecutionStep{}, "1.0") require.NoError(t, err) var gotIterations = false @@ -724,7 +729,7 @@ func TestNewName(t *testing.T) { } collector, err := New(NewConfig(), script, lib.Options{ Duration: types.NullDurationFrom(1 * time.Second), - }, "1.0") + }, []lib.ExecutionStep{}, "1.0") require.NoError(t, err) require.Equal(t, collector.config.Name.String, testCase.expected) }) diff --git a/stats/cloud/config.go b/stats/cloud/config.go index 21ecc85e90b..73a8e0cdc5f 100644 --- a/stats/cloud/config.go +++ b/stats/cloud/config.go @@ -23,8 +23,9 @@ package cloud import ( "time" - "github.com/loadimpact/k6/lib/types" "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib/types" ) // Config holds all the necessary data and options for sending metrics to the Load Impact cloud. @@ -37,6 +38,7 @@ type Config struct { Name null.String `json:"name" envconfig:"K6_CLOUD_NAME"` Host null.String `json:"host" envconfig:"K6_CLOUD_HOST"` + PushRefID null.String `json:"pushRefID" envconfig:"K6_CLOUD_PUSH_REF_ID"` WebAppURL null.String `json:"webAppURL" envconfig:"K6_CLOUD_WEB_APP_URL"` NoCompress null.Bool `json:"noCompress" envconfig:"K6_CLOUD_NO_COMPRESS"` diff --git a/stats/cloud/data_test.go b/stats/cloud/data_test.go index 53486e6d601..eb9dda7de64 100644 --- a/stats/cloud/data_test.go +++ b/stats/cloud/data_test.go @@ -27,11 +27,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/loadimpact/k6/lib/metrics" "github.com/loadimpact/k6/lib/netext/httpext" "github.com/loadimpact/k6/stats" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestTimestampMarshaling(t *testing.T) { diff --git a/stats/csv/collector.go b/stats/csv/collector.go index 0f27768da5a..4669d959012 100644 --- a/stats/csv/collector.go +++ b/stats/csv/collector.go @@ -31,10 +31,11 @@ import ( "sync" "time" - "github.com/loadimpact/k6/lib" - "github.com/loadimpact/k6/stats" "github.com/sirupsen/logrus" "github.com/spf13/afero" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/stats" ) // Collector saving output to csv implements the lib.Collector interface diff --git a/stats/csv/collector_test.go b/stats/csv/collector_test.go index 149eb714d43..5520d0f89a6 100644 --- a/stats/csv/collector_test.go +++ b/stats/csv/collector_test.go @@ -28,13 +28,12 @@ import ( "testing" "time" + "github.com/spf13/afero" + "github.com/stretchr/testify/assert" "gopkg.in/guregu/null.v3" "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/stats" - - "github.com/spf13/afero" - "github.com/stretchr/testify/assert" ) func TestMakeHeader(t *testing.T) { diff --git a/stats/csv/config.go b/stats/csv/config.go index 87b5f626ae0..95914d2a749 100644 --- a/stats/csv/config.go +++ b/stats/csv/config.go @@ -25,8 +25,9 @@ import ( "strings" "time" - "github.com/loadimpact/k6/lib/types" "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib/types" ) // Config is the config for the csv collector diff --git a/stats/datadog/collector_test.go b/stats/datadog/collector_test.go index 205e7279a5a..e1ff04e42f6 100644 --- a/stats/datadog/collector_test.go +++ b/stats/datadog/collector_test.go @@ -1,13 +1,34 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package datadog import ( "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/loadimpact/k6/stats" "github.com/loadimpact/k6/stats/statsd/common" "github.com/loadimpact/k6/stats/statsd/common/testutil" - "github.com/stretchr/testify/require" ) func TestCollector(t *testing.T) { diff --git a/stats/dummy/collector.go b/stats/dummy/collector.go index 25534113795..fa147c699ab 100644 --- a/stats/dummy/collector.go +++ b/stats/dummy/collector.go @@ -23,8 +23,6 @@ package dummy import ( "context" - "github.com/sirupsen/logrus" - "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/stats" ) @@ -49,7 +47,6 @@ func (c *Collector) MakeConfig() interface{} { return nil } // Run just blocks until the context is done func (c *Collector) Run(ctx context.Context) { <-ctx.Done() - logrus.Debugf("finished status: %d", c.RunStatus) } // Collect just appends all of the samples passed to it to the internal sample slice. diff --git a/stats/influxdb/bench_test.go b/stats/influxdb/bench_test.go index e3f3b046fa8..4fc01093bd2 100644 --- a/stats/influxdb/bench_test.go +++ b/stats/influxdb/bench_test.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package influxdb import ( diff --git a/stats/influxdb/collector.go b/stats/influxdb/collector.go index 86f8430d3d7..ac32db97643 100644 --- a/stats/influxdb/collector.go +++ b/stats/influxdb/collector.go @@ -27,9 +27,10 @@ import ( "time" client "github.com/influxdata/influxdb1-client/v2" + "github.com/sirupsen/logrus" + "github.com/loadimpact/k6/lib" "github.com/loadimpact/k6/stats" - "github.com/sirupsen/logrus" ) // Verify that Collector implements lib.Collector diff --git a/stats/influxdb/collector_test.go b/stats/influxdb/collector_test.go index 61e0b0edc98..42b33f8d8a2 100644 --- a/stats/influxdb/collector_test.go +++ b/stats/influxdb/collector_test.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package influxdb import ( @@ -10,9 +30,10 @@ import ( "testing" "time" - "github.com/loadimpact/k6/stats" "github.com/stretchr/testify/require" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/stats" ) func TestBadConcurrentWrites(t *testing.T) { diff --git a/stats/influxdb/config.go b/stats/influxdb/config.go index 794a4ae43a0..fe4416bb06b 100644 --- a/stats/influxdb/config.go +++ b/stats/influxdb/config.go @@ -27,10 +27,11 @@ import ( "time" "github.com/kubernetes/helm/pkg/strvals" - "github.com/loadimpact/k6/lib/types" "github.com/mitchellh/mapstructure" "github.com/pkg/errors" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib/types" ) type Config struct { diff --git a/stats/influxdb/config_test.go b/stats/influxdb/config_test.go index 115831eedf2..773815fb1c0 100644 --- a/stats/influxdb/config_test.go +++ b/stats/influxdb/config_test.go @@ -24,7 +24,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" ) func TestParseArg(t *testing.T) { diff --git a/stats/influxdb/util.go b/stats/influxdb/util.go index eeaa31cf4c9..172c008d7dd 100644 --- a/stats/influxdb/util.go +++ b/stats/influxdb/util.go @@ -24,7 +24,7 @@ import ( "strings" client "github.com/influxdata/influxdb1-client/v2" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" ) func MakeClient(conf Config) (client.Client, error) { diff --git a/stats/influxdb/util_test.go b/stats/influxdb/util_test.go index 29bef1b7e0f..2653752d35b 100644 --- a/stats/influxdb/util_test.go +++ b/stats/influxdb/util_test.go @@ -25,7 +25,7 @@ import ( client "github.com/influxdata/influxdb1-client/v2" "github.com/stretchr/testify/assert" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" ) func TestMakeBatchConfig(t *testing.T) { diff --git a/stats/json/collector.go b/stats/json/collector.go index ee1ef04772f..291679465c6 100644 --- a/stats/json/collector.go +++ b/stats/json/collector.go @@ -98,8 +98,9 @@ func (c *Collector) Init() error { func (c *Collector) SetRunStatus(status lib.RunStatus) {} func (c *Collector) Run(ctx context.Context) { + const timeout = 200 logrus.Debug("JSON output: Running!") - ticker := time.NewTicker(time.Millisecond * 100) + ticker := time.NewTicker(time.Millisecond * timeout) defer func() { _ = c.closeFn() }() @@ -142,7 +143,6 @@ func (c *Collector) commit() { samples := c.buffer c.buffer = nil c.bufferLock.Unlock() - logrus.WithField("filename", c.fname).Debug("JSON: Writing JSON metrics") var start = time.Now() var count int for _, sc := range samples { @@ -160,8 +160,10 @@ func (c *Collector) commit() { } } } - logrus.WithField("filename", c.fname).WithField("t", time.Since(start)). - WithField("count", count).Debug("JSON: Wrote JSON metrics") + if count > 0 { + logrus.WithField("filename", c.fname).WithField("t", time.Since(start)). + WithField("count", count).Debug("JSON: Wrote JSON metrics") + } } func (c *Collector) Link() string { diff --git a/stats/json/wrapper_test.go b/stats/json/wrapper_test.go index 490719533d0..c50e8dadc7a 100644 --- a/stats/json/wrapper_test.go +++ b/stats/json/wrapper_test.go @@ -23,8 +23,9 @@ package json import ( "testing" - "github.com/loadimpact/k6/stats" "github.com/stretchr/testify/assert" + + "github.com/loadimpact/k6/stats" ) func TestWrapersWithNilArg(t *testing.T) { diff --git a/stats/kafka/collector_test.go b/stats/kafka/collector_test.go index 0452857950c..1db7eae0b8b 100644 --- a/stats/kafka/collector_test.go +++ b/stats/kafka/collector_test.go @@ -26,9 +26,10 @@ import ( "testing" "github.com/Shopify/sarama" - "github.com/loadimpact/k6/stats" "github.com/stretchr/testify/assert" "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/stats" ) func TestRun(t *testing.T) { diff --git a/stats/kafka/config.go b/stats/kafka/config.go index 49941186d61..9d391155e5e 100644 --- a/stats/kafka/config.go +++ b/stats/kafka/config.go @@ -24,10 +24,11 @@ import ( "time" "github.com/kubernetes/helm/pkg/strvals" - "github.com/loadimpact/k6/lib/types" - "github.com/loadimpact/k6/stats/influxdb" "github.com/mitchellh/mapstructure" "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/lib/types" + "github.com/loadimpact/k6/stats/influxdb" ) // Config is the config for the kafka collector diff --git a/stats/kafka/config_test.go b/stats/kafka/config_test.go index cbced1f7e5d..e9bba9ff637 100644 --- a/stats/kafka/config_test.go +++ b/stats/kafka/config_test.go @@ -23,9 +23,10 @@ package kafka import ( "testing" - "github.com/loadimpact/k6/stats/influxdb" "github.com/stretchr/testify/assert" "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/stats/influxdb" ) func TestConfigParseArg(t *testing.T) { diff --git a/stats/stats.go b/stats/stats.go index 7bfe04782eb..d1fdc6731b5 100644 --- a/stats/stats.go +++ b/stats/stats.go @@ -252,11 +252,12 @@ func (st *SampleTags) UnmarshalJSON(data []byte) error { // CloneTags copies the underlying set of a sample tags and // returns it. If the receiver is nil, it returns an empty non-nil map. func (st *SampleTags) CloneTags() map[string]string { - res := map[string]string{} - if st != nil { - for k, v := range st.tags { - res[k] = v - } + if st == nil { + return map[string]string{} + } + res := make(map[string]string, len(st.tags)) + for k, v := range st.tags { + res[k] = v } return res } diff --git a/stats/statsd/collector_test.go b/stats/statsd/collector_test.go index e5947861a09..10745cc70f0 100644 --- a/stats/statsd/collector_test.go +++ b/stats/statsd/collector_test.go @@ -1,11 +1,32 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package statsd import ( "testing" + "github.com/stretchr/testify/require" + "github.com/loadimpact/k6/stats" "github.com/loadimpact/k6/stats/statsd/common/testutil" - "github.com/stretchr/testify/require" ) func TestCollector(t *testing.T) { diff --git a/stats/statsd/common/collector_test.go b/stats/statsd/common/collector_test.go index 5804063343a..2b94d468862 100644 --- a/stats/statsd/common/collector_test.go +++ b/stats/statsd/common/collector_test.go @@ -1,12 +1,32 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package common import ( "testing" - "github.com/loadimpact/k6/stats" - "github.com/stretchr/testify/require" - null "gopkg.in/guregu/null.v3" + "gopkg.in/guregu/null.v3" + + "github.com/loadimpact/k6/stats" ) func TestInitWithoutAddressErrors(t *testing.T) { diff --git a/stats/statsd/common/config.go b/stats/statsd/common/config.go index 5d7518bed58..a640f61568b 100644 --- a/stats/statsd/common/config.go +++ b/stats/statsd/common/config.go @@ -23,8 +23,9 @@ package common import ( "time" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/lib/types" - null "gopkg.in/guregu/null.v3" ) // Config defines the statsd configuration diff --git a/stats/statsd/common/testutil/test_helper.go b/stats/statsd/common/testutil/test_helper.go index b7c49e53a1b..85b43f0adc1 100644 --- a/stats/statsd/common/testutil/test_helper.go +++ b/stats/statsd/common/testutil/test_helper.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package testutil import ( @@ -6,11 +26,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v3" + "github.com/loadimpact/k6/lib/types" "github.com/loadimpact/k6/stats" "github.com/loadimpact/k6/stats/statsd/common" - "github.com/stretchr/testify/require" - null "gopkg.in/guregu/null.v3" ) // BaseTest is a helper function to test statsd/datadog collector throughtly diff --git a/stats/system_tag.go b/stats/system_tag.go index fac1d0dda4d..9d98f64a366 100644 --- a/stats/system_tag.go +++ b/stats/system_tag.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package stats import ( @@ -29,6 +49,7 @@ const ( TagError TagErrorCode TagTLSVersion + TagScenario // System tags not enabled by default. TagIter @@ -41,7 +62,7 @@ const ( // Other tags that are not enabled by default include: iter, vu, ocsp_status, ip //nolint:gochecknoglobals var DefaultSystemTagSet = TagProto | TagSubproto | TagStatus | TagMethod | TagURL | TagName | TagGroup | - TagCheck | TagCheck | TagError | TagErrorCode | TagTLSVersion + TagCheck | TagCheck | TagError | TagErrorCode | TagTLSVersion | TagScenario // Add adds a tag to tag set. func (i *SystemTagSet) Add(tag SystemTagSet) { diff --git a/stats/system_tag_set_gen.go b/stats/system_tag_set_gen.go index c4f12aa872c..5514e31cf94 100644 --- a/stats/system_tag_set_gen.go +++ b/stats/system_tag_set_gen.go @@ -7,7 +7,7 @@ import ( "fmt" ) -const _SystemTagSetName = "protosubprotostatusmethodurlnamegroupcheckerrorerror_codetls_versionitervuocsp_statusip" +const _SystemTagSetName = "protosubprotostatusmethodurlnamegroupcheckerrorerror_codetls_versionscenarioitervuocsp_statusip" var _SystemTagSetMap = map[SystemTagSet]string{ 1: _SystemTagSetName[0:5], @@ -21,10 +21,11 @@ var _SystemTagSetMap = map[SystemTagSet]string{ 256: _SystemTagSetName[42:47], 512: _SystemTagSetName[47:57], 1024: _SystemTagSetName[57:68], - 2048: _SystemTagSetName[68:72], - 4096: _SystemTagSetName[72:74], - 8192: _SystemTagSetName[74:85], - 16384: _SystemTagSetName[85:87], + 2048: _SystemTagSetName[68:76], + 4096: _SystemTagSetName[76:80], + 8192: _SystemTagSetName[80:82], + 16384: _SystemTagSetName[82:93], + 32768: _SystemTagSetName[93:95], } func (i SystemTagSet) String() string { @@ -34,7 +35,7 @@ func (i SystemTagSet) String() string { return fmt.Sprintf("SystemTagSet(%d)", i) } -var _SystemTagSetValues = []SystemTagSet{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384} +var _SystemTagSetValues = []SystemTagSet{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768} var _SystemTagSetNameToValueMap = map[string]SystemTagSet{ _SystemTagSetName[0:5]: 1, @@ -48,10 +49,11 @@ var _SystemTagSetNameToValueMap = map[string]SystemTagSet{ _SystemTagSetName[42:47]: 256, _SystemTagSetName[47:57]: 512, _SystemTagSetName[57:68]: 1024, - _SystemTagSetName[68:72]: 2048, - _SystemTagSetName[72:74]: 4096, - _SystemTagSetName[74:85]: 8192, - _SystemTagSetName[85:87]: 16384, + _SystemTagSetName[68:76]: 2048, + _SystemTagSetName[76:80]: 4096, + _SystemTagSetName[80:82]: 8192, + _SystemTagSetName[82:93]: 16384, + _SystemTagSetName[93:95]: 32768, } // SystemTagSetString retrieves an enum value from the enum constants string name. diff --git a/stats/system_tag_test.go b/stats/system_tag_test.go index 64de86433af..27c98e3fd17 100644 --- a/stats/system_tag_test.go +++ b/stats/system_tag_test.go @@ -1,3 +1,23 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + package stats import ( diff --git a/stats/thresholds_test.go b/stats/thresholds_test.go index 127407dbc6f..5c91dccf8de 100644 --- a/stats/thresholds_test.go +++ b/stats/thresholds_test.go @@ -26,8 +26,9 @@ import ( "time" "github.com/dop251/goja" - "github.com/loadimpact/k6/lib/types" "github.com/stretchr/testify/assert" + + "github.com/loadimpact/k6/lib/types" ) func TestNewThreshold(t *testing.T) { diff --git a/ui/pb/helpers.go b/ui/pb/helpers.go new file mode 100644 index 00000000000..58edadae094 --- /dev/null +++ b/ui/pb/helpers.go @@ -0,0 +1,144 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package pb + +import ( + "math" + "strconv" + "time" +) + +// GetFixedLengthIntFormat returns "%0__d" format argument for fmt functions +// that will produce a base-10 right-aligned zero-padded string representation +// of the supplied integer value. The number of characters (i.e. the actual +// number + how many zeros it will be padded on the left with) in the returned +// string corresponds to the number of digits in the supplied maxValue. +func GetFixedLengthIntFormat(maxValue int64) (formatStr string) { + resLen := 1 + if maxValue < 0 { + resLen++ + } + for maxValue /= 10; maxValue != 0; maxValue /= 10 { + resLen++ + } + return "%0" + strconv.Itoa(resLen) + "d" +} + +// GetFixedLengthFloatFormat returns "%0__.__f" format argument for fmt +// functions that will produce a base-10 right-aligned zero-padded string +// representation of the supplied float value, with the specified decimal +// precision. The number of characters (i.e. the actual number + maybe dot and +// precision + how many zeros it will be padded on the left with) in the +// returned string corresponds to the number of digits in the supplied maxValue +// and the desired precision. +func GetFixedLengthFloatFormat(maxValue float64, precision uint) (formatStr string) { + resLen := 1 + if maxValue < 0 { + maxValue = -maxValue + resLen++ + } + if maxValue >= 10 { + resLen += int(math.Log10(maxValue)) + } + if precision > 0 { + resLen += int(precision + 1) + } + return "%0" + strconv.Itoa(resLen) + "." + strconv.Itoa(int(precision)) + "f" +} + +// GetFixedLengthDuration takes a *positive* duration and its max value and +// returns a string with a fixed width so we can prevent UI elements jumping +// around. The format is "___d__h__m__s.s", but leading values can be omitted +// based on the maxDuration value, the results can be: "___h__m__s.s". +// +// This is code was inspired by the Go stdlib's time.Duration.String() code. +// TODO: more flexibility - negative values or variable precision? +func GetFixedLengthDuration(d, maxDuration time.Duration) (result string) { + const rounding = 100 * time.Millisecond + if d < 0 { + d = -d + } + if maxDuration < 0 { + maxDuration = -maxDuration + } + if maxDuration < d { + maxDuration = d + } + maxDuration = maxDuration.Round(rounding) + + // Largest time is "106751d23h47m16.9s", i.e. time.Duration(math.MaxInt64) + // Positions: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 + buf := [18]byte{'0', '0', '0', '0', '0', '0', 'd', '0', '0', 'h', '0', '0', 'm', '0', '0', '.', '0', 's'} + + u := uint64(d.Round(rounding) / (rounding)) + u, buf[16] = u/10, byte(u%10)+'0' + u, buf[14] = u/10, byte(u%10)+'0' + if maxDuration < 10*time.Second { + return string(buf[14:]) + } + + u, buf[13] = u/6, byte(u%6)+'0' + if maxDuration < time.Minute { + return string(buf[13:]) + } + + u, buf[11] = u/10, byte(u%10)+'0' + if maxDuration < 10*time.Minute { + return string(buf[11:]) + } + + u, buf[10] = u/6, byte(u%6)+'0' + if maxDuration < time.Hour { + return string(buf[10:]) + } + + u, h := u/24, u%24 + buf[7], buf[8] = byte(h/10)+'0', byte(h%10)+'0' + if maxDuration < 10*time.Hour { + return string(buf[8:]) + } else if maxDuration < 24*time.Hour { + return string(buf[7:]) + } + + u, buf[5] = u/10, byte(u%10)+'0' + remDayPowers := maxDuration / (240 * time.Hour) + i := 5 + for remDayPowers > 0 { + i-- + u, buf[i] = u/10, byte(u%10)+'0' + remDayPowers /= 10 + } + + return string(buf[i:]) +} + +// Clampf returns the given value, "clamped" to the range [min, max]. +// This is copied from lib/util.go to avoid circular imports. +func Clampf(val, min, max float64) float64 { + switch { + case val < min: + return min + case val > max: + return max + default: + return val + } +} diff --git a/ui/pb/helpers_test.go b/ui/pb/helpers_test.go new file mode 100644 index 00000000000..eb8f58a68d9 --- /dev/null +++ b/ui/pb/helpers_test.go @@ -0,0 +1,170 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package pb + +import ( + "fmt" + "math" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/loadimpact/k6/lib/types" +) + +func TestGetFixedLengthInt(t *testing.T) { + testCases := []struct { + val, maxVal int64 + expRes string + }{ + {1, 0, "1"}, + {1, 1, "1"}, + {1, 5, "1"}, + {111, 5, "111"}, + {-1, 5, "-1"}, + {-1, -50, "-01"}, + {-1, 50, "-1"}, + + {1, 15, "01"}, + {1, 15, "01"}, + {1, 150, "001"}, + {1, 1500, "0001"}, + {999, 1500, "0999"}, + {-999, 1500, "-999"}, + {-9999, 1500, "-9999"}, + {1, 10000, "00001"}, + {1234567, 10000, "1234567"}, + {123456790, math.MaxInt64, "0000000000123456790"}, + {-123456790, math.MaxInt64, "-000000000123456790"}, + {math.MaxInt64, math.MaxInt64, "9223372036854775807"}, + {-123456790, math.MinInt64, "-0000000000123456790"}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.expRes, func(t *testing.T) { + fmtFormat := GetFixedLengthIntFormat(tc.maxVal) + res := fmt.Sprintf(fmtFormat, tc.val) + assert.Equal(t, tc.expRes, res) + back, err := strconv.ParseInt(res, 10, 64) + require.NoError(t, err) + assert.Equal(t, tc.val, back) + }) + } +} +func TestGetFixedLengthFloat(t *testing.T) { + testCases := []struct { + val, maxVal float64 + precision uint + expRes string + }{ + {0, 0, 0, "0"}, + {0, 0, 2, "0.00"}, + {0, 100, 2, "000.00"}, + {0, -100, 2, "0000.00"}, + {12, -100, 2, "0012.00"}, + {-12, -100, 2, "-012.00"}, + {12, 99, 2, "12.00"}, + {12, 100, 2, "012.00"}, + {1, 0, 0, "1"}, + {1, 0, 1, "1.0"}, + {1, 0, 2, "1.00"}, + {1.01, 0, 1, "1.0"}, + {1.01, 0, 1, "1.0"}, + {1.01, 0, 2, "1.01"}, + {1.007, 0, 2, "1.01"}, + {1.003, 0, 2, "1.00"}, + {1.003, 0, 3, "1.003"}, + {1.003, 0, 4, "1.0030"}, + {1.003, 1, 4, "1.0030"}, + {1.003, 9.999, 4, "1.0030"}, + {1.003, 10, 4, "01.0030"}, + {1.003, -10, 4, "001.0030"}, + {-1.003, -10, 4, "-01.0030"}, + {12.003, 1000, 4, "0012.0030"}, + } + + for i, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("tc%d_exp_%s", i, tc.expRes), func(t *testing.T) { + fmtFormat := GetFixedLengthFloatFormat(tc.maxVal, tc.precision) + res := fmt.Sprintf(fmtFormat, tc.val) + assert.Equal(t, tc.expRes, res) + back, err := strconv.ParseFloat(res, 64) + require.NoError(t, err) + + precPow := math.Pow(10, float64(tc.precision)) + expParseVal := math.Round(tc.val*precPow) / precPow + assert.Equal(t, expParseVal, back) + }) + } +} + +func TestGetFixedLengthDuration(t *testing.T) { + testCases := []struct { + val, maxVal time.Duration + expRes string + }{ + {0, 0, "0.0s"}, + {1 * time.Second, 0, "1.0s"}, + {9*time.Second + 940*time.Millisecond, 0, "9.9s"}, + {9*time.Second + 950*time.Millisecond, 0, "10.0s"}, + {1100 * time.Millisecond, 0, "1.1s"}, + {-1100 * time.Millisecond, 0, "1.1s"}, + {1100 * time.Millisecond, 10 * time.Second, "01.1s"}, + {1100 * time.Millisecond, 1 * time.Minute, "0m01.1s"}, + {1100 * time.Millisecond, -1 * time.Minute, "0m01.1s"}, + {-1100 * time.Millisecond, -1 * time.Minute, "0m01.1s"}, + {1100 * time.Millisecond, 10 * time.Minute, "00m01.1s"}, + {1100 * time.Millisecond, time.Hour, "0h00m01.1s"}, + {1100 * time.Millisecond, 10 * time.Hour, "00h00m01.1s"}, + {183 * time.Second, 10 * time.Minute, "03m03.0s"}, + {183 * time.Second, 120 * time.Minute, "0h03m03.0s"}, + {183 * time.Second, 10 * time.Hour, "00h03m03.0s"}, + {183 * time.Second, 25 * time.Hour, "0d00h03m03.0s"}, + {25 * time.Hour, 25 * time.Hour, "1d01h00m00.0s"}, + {482 * time.Hour, 25 * time.Hour, "20d02h00m00.0s"}, + {482 * time.Hour, 4800 * time.Hour, "020d02h00m00.0s"}, + {482*time.Hour + 671*time.Second + 65*time.Millisecond, time.Duration(math.MaxInt64), "000020d02h11m11.1s"}, + + // subtracting a second since rounding doesn't work as expected at the limits of int64 + {time.Duration(math.MaxInt64) - time.Second, time.Duration(math.MaxInt64), "106751d23h47m15.9s"}, + } + + for i, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("tc%d_exp_%s", i, tc.expRes), func(t *testing.T) { + res := GetFixedLengthDuration(tc.val, tc.maxVal) + assert.Equal(t, tc.expRes, res) + + expBackDur := tc.val.Round(100 * time.Millisecond) + if expBackDur < 0 { + expBackDur = -expBackDur + } + backDur, err := types.ParseExtendedDuration(res) + assert.NoError(t, err) + assert.Equal(t, expBackDur, backDur) + }) + } +} diff --git a/ui/pb/progressbar.go b/ui/pb/progressbar.go new file mode 100644 index 00000000000..90bf830abc0 --- /dev/null +++ b/ui/pb/progressbar.go @@ -0,0 +1,277 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package pb + +import ( + "fmt" + "strings" + "sync" + + "github.com/fatih/color" + "github.com/sirupsen/logrus" +) + +//nolint:gochecknoglobals +var ( + colorFaint = color.New(color.Faint) + statusColors = map[Status]*color.Color{ + Interrupted: color.New(color.FgRed), + Done: color.New(color.FgGreen), + Waiting: colorFaint, + } +) + +const ( + // DefaultWidth of the progress bar + DefaultWidth = 40 + // threshold below which progress should be rendered as + // percentages instead of filling bars + minWidth = 8 +) + +// Status of the progress bar +type Status rune + +// Progress bar status symbols +const ( + Running Status = ' ' + Waiting Status = '•' + Stopping Status = '↓' + Interrupted Status = '✗' + Done Status = '✓' +) + +// ProgressBar is a simple thread-safe progressbar implementation with +// callbacks. +type ProgressBar struct { + mutex sync.RWMutex + width int + logger *logrus.Entry + status Status + + left func() string + progress func() (progress float64, right []string) + hijack func() string +} + +// ProgressBarOption is used for helper functions that modify the progressbar +// parameters, either in the constructor or via the Modify() method. +type ProgressBarOption func(*ProgressBar) + +// WithLeft modifies the function that returns the left progressbar value. +func WithLeft(left func() string) ProgressBarOption { + return func(pb *ProgressBar) { pb.left = left } +} + +// WithConstLeft sets the left progressbar value to the supplied const. +func WithConstLeft(left string) ProgressBarOption { + return func(pb *ProgressBar) { + pb.left = func() string { return left } + } +} + +// WithLogger modifies the logger instance +func WithLogger(logger *logrus.Entry) ProgressBarOption { + return func(pb *ProgressBar) { pb.logger = logger } +} + +// WithProgress modifies the progress calculation function. +func WithProgress(progress func() (float64, []string)) ProgressBarOption { + return func(pb *ProgressBar) { pb.progress = progress } +} + +// WithStatus modifies the progressbar status +func WithStatus(status Status) ProgressBarOption { + return func(pb *ProgressBar) { pb.status = status } +} + +// WithConstProgress sets the progress and right values to the supplied consts. +func WithConstProgress(progress float64, right ...string) ProgressBarOption { + return func(pb *ProgressBar) { + pb.progress = func() (float64, []string) { return progress, right } + } +} + +// WithHijack replaces the progressbar Render function with the argument. +func WithHijack(hijack func() string) ProgressBarOption { + return func(pb *ProgressBar) { pb.hijack = hijack } +} + +// New creates and initializes a new ProgressBar struct, calling all of the +// supplied options +func New(options ...ProgressBarOption) *ProgressBar { + pb := &ProgressBar{ + mutex: sync.RWMutex{}, + width: DefaultWidth, + } + pb.Modify(options...) + return pb +} + +// Left returns the left part of the progressbar in a thread-safe way. +func (pb *ProgressBar) Left() string { + pb.mutex.RLock() + defer pb.mutex.RUnlock() + + return pb.renderLeft(0) +} + +// renderLeft renders the left part of the progressbar, replacing text +// exceeding maxLen with an ellipsis. +func (pb *ProgressBar) renderLeft(maxLen int) string { + var left string + if pb.left != nil { + l := pb.left() + if maxLen > 0 && len(l) > maxLen { + l = l[:maxLen-3] + "..." + } + left = l + } + return left +} + +// Modify changes the progressbar options in a thread-safe way. +func (pb *ProgressBar) Modify(options ...ProgressBarOption) { + pb.mutex.Lock() + defer pb.mutex.Unlock() + for _, option := range options { + option(pb) + } +} + +// ProgressBarRender stores the different rendered parts of the +// progress bar UI to allow dynamic positioning and padding of +// elements in the terminal output (e.g. for responsive progress +// bars). +type ProgressBarRender struct { + Right []string + progress, progressFill, progressPadding string + Left, Hijack string + status Status + Color bool +} + +// Status returns an optionally colorized status string +func (pbr *ProgressBarRender) Status() string { + status := " " + + if pbr.status > 0 { + status = string(pbr.status) + if c, ok := statusColors[pbr.status]; pbr.Color && ok { + status = c.Sprint(status) + } + } + + return status +} + +// Progress returns an assembled and optionally colorized progress string +func (pbr *ProgressBarRender) Progress() string { + var body string + if pbr.progress != "" { + body = fmt.Sprintf(" %s ", pbr.progress) + } else { + padding := pbr.progressPadding + if pbr.Color { + padding = colorFaint.Sprint(pbr.progressPadding) + } + body = pbr.progressFill + padding + } + return fmt.Sprintf("[%s]", body) +} + +func (pbr ProgressBarRender) String() string { + if pbr.Hijack != "" { + return pbr.Hijack + } + var right string + if len(pbr.Right) > 0 { + right = " " + strings.Join(pbr.Right, " ") + } + return pbr.Left + " " + pbr.Status() + " " + pbr.Progress() + right +} + +// Render locks the progressbar struct for reading and calls all of +// its methods to return the final output. A struct is returned over a +// plain string to allow dynamic padding and positioning of elements +// depending on other elements on the screen. +// - maxLeft defines the maximum character length of the left-side +// text. Characters exceeding this length will be replaced with a +// single ellipsis. Passing <=0 disables this. +// - widthDelta changes the progress bar width the specified amount of +// characters. E.g. passing -2 would shorten the width by 2 chars. +// If the resulting width is lower than minWidth, progress will be +// rendered as a percentage instead of a filling bar. +func (pb *ProgressBar) Render(maxLeft, widthDelta int) ProgressBarRender { + pb.mutex.RLock() + defer pb.mutex.RUnlock() + + var out ProgressBarRender + if pb.hijack != nil { + out.Hijack = pb.hijack() + return out + } + + var progress float64 + if pb.progress != nil { + progress, out.Right = pb.progress() + progressClamped := Clampf(progress, 0, 1) + if progress != progressClamped { + progress = progressClamped + if pb.logger != nil { + pb.logger.Warnf("progress value %.2f exceeds valid range, clamped between 0 and 1", progress) + } + } + } + + width := Clampf(float64(pb.width+widthDelta), minWidth, DefaultWidth) + pb.width = int(width) + + if pb.width > minWidth { //nolint:nestif + space := pb.width - 2 + filled := int(float64(space) * progress) + + filling := "" + caret := "" + if filled > 0 { + if filled < space { + filling = strings.Repeat("=", filled-1) + caret = ">" + } else { + filling = strings.Repeat("=", filled) + } + } + + out.progressPadding = "" + if space > filled { + out.progressPadding = strings.Repeat("-", space-filled) + } + + out.progressFill = filling + caret + } else { + out.progress = fmt.Sprintf("%3.f%%", progress*100) + } + + out.Left = pb.renderLeft(maxLeft) + out.status = pb.status + + return out +} diff --git a/ui/pb/progressbar_test.go b/ui/pb/progressbar_test.go new file mode 100644 index 00000000000..b9b9900da80 --- /dev/null +++ b/ui/pb/progressbar_test.go @@ -0,0 +1,139 @@ +/* + * + * k6 - a next-generation load testing tool + * Copyright (C) 2019 Load Impact + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + */ + +package pb + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// TODO(imiric): Consider adding logging tests for 100% pb coverage. +// Unfortunately the following introduces an import cycle: pb -> lib -> pb +// func getTestLogger() *logger.Entry { +// logHook := &testutils.SimpleLogrusHook{HookedLevels: []logrus.Level{logrus.WarnLevel}} +// testLog := logrus.New() +// testLog.AddHook(logHook) +// testLog.SetOutput(ioutil.Discard) +// return logrus.NewEntry(testLog) +// } + +func TestProgressBarRender(t *testing.T) { + t.Parallel() + + testCases := []struct { + options []ProgressBarOption + pbWidthDelta int + expected string + }{ + {[]ProgressBarOption{WithLeft(func() string { return "left" })}, + 0, "left [--------------------------------------]"}, + {[]ProgressBarOption{WithConstLeft("constLeft")}, + 0, "constLeft [--------------------------------------]"}, + {[]ProgressBarOption{ + WithLeft(func() string { return "left" }), + WithStatus(Done), + }, 0, "left ✓ [--------------------------------------]"}, + {[]ProgressBarOption{ + WithLeft(func() string { return "left" }), + WithProgress(func() (float64, []string) { return 0, []string{"right"} }), + }, 0, "left [--------------------------------------] right"}, + {[]ProgressBarOption{ + WithLeft(func() string { return "left" }), + WithProgress(func() (float64, []string) { return 0.5, []string{"right"} }), + }, 0, "left [==================>-------------------] right"}, + {[]ProgressBarOption{ + WithLeft(func() string { return "left" }), + WithProgress(func() (float64, []string) { return 1.0, []string{"right"} }), + }, 0, "left [======================================] right"}, + {[]ProgressBarOption{ + WithLeft(func() string { return "left" }), + WithProgress(func() (float64, []string) { return -1, []string{"right"} }), + }, 0, "left [--------------------------------------] right"}, + {[]ProgressBarOption{ + WithLeft(func() string { return "left" }), + WithProgress(func() (float64, []string) { return 2, []string{"right"} }), + }, 0, "left [======================================] right"}, + {[]ProgressBarOption{ + WithLeft(func() string { return "left" }), + WithConstProgress(0.2, "constProgress"), + }, 0, "left [======>-------------------------------] constProgress"}, + {[]ProgressBarOption{ + WithHijack(func() string { return "progressbar hijack!" }), + }, 0, "progressbar hijack!"}, + {[]ProgressBarOption{WithConstProgress(0.25, "")}, + -DefaultWidth, " [ 25% ] "}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.expected, func(t *testing.T) { + pbar := New(tc.options...) + assert.NotNil(t, pbar) + assert.Equal(t, tc.expected, pbar.Render(0, tc.pbWidthDelta).String()) + }) + } +} + +func TestProgressBarRenderPaddingMaxLeft(t *testing.T) { + t.Parallel() + + testCases := []struct { + maxLen int + left string + expected string + }{ + {-1, "left", "left [--------------------------------------]"}, + {0, "left", "left [--------------------------------------]"}, + {10, "left_truncated", + "left_tr... [--------------------------------------]"}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.left, func(t *testing.T) { + pbar := New(WithLeft(func() string { return tc.left })) + assert.NotNil(t, pbar) + assert.Equal(t, tc.expected, pbar.Render(tc.maxLen, 0).String()) + }) + } +} + +func TestProgressBarLeft(t *testing.T) { + t.Parallel() + + testCases := []struct { + left func() string + expected string + }{ + {nil, ""}, + {func() string { return " left " }, " left "}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.expected, func(t *testing.T) { + pbar := New(WithLeft(tc.left)) + assert.NotNil(t, pbar) + assert.Equal(t, tc.expected, pbar.Left()) + }) + } +} diff --git a/ui/progress_bar.go b/ui/progress_bar.go deleted file mode 100644 index 5e098a42313..00000000000 --- a/ui/progress_bar.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * - * k6 - a next-generation load testing tool - * Copyright (C) 2016 Load Impact - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - * - */ - -package ui - -import ( - "fmt" - "strings" - - "github.com/fatih/color" -) - -var ( - faint = color.New(color.Faint) -) - -type ProgressBar struct { - Width int - Progress float64 - Left, Right func() string -} - -func (b ProgressBar) String() string { - space := b.Width - 2 - filled := int(float64(space) * b.Progress) - - filling := "" - caret := "" - if filled > 0 { - if filled < space { - filling = strings.Repeat("=", filled-1) - caret = ">" - } else { - filling = strings.Repeat("=", filled) - } - } - - padding := "" - if space > filled { - padding = faint.Sprint(strings.Repeat("-", space-filled)) - } - - var left, right string - if b.Left != nil { - left = b.Left() + " " - } - if b.Right != nil { - right = " " + b.Right() - } - return fmt.Sprintf("%s[%s%s%s]%s", left, filling, caret, padding, right) -} diff --git a/ui/summary.go b/ui/summary.go index 8ff03d929c1..dc981d8cc43 100644 --- a/ui/summary.go +++ b/ui/summary.go @@ -29,10 +29,11 @@ import ( "strings" "time" - "github.com/loadimpact/k6/lib" - "github.com/loadimpact/k6/stats" "github.com/pkg/errors" "golang.org/x/text/unicode/norm" + + "github.com/loadimpact/k6/lib" + "github.com/loadimpact/k6/stats" ) const ( diff --git a/vendor/github.com/dop251/goja/compiler.go b/vendor/github.com/dop251/goja/compiler.go index 385ac0dc254..47d7fb74fcd 100644 --- a/vendor/github.com/dop251/goja/compiler.go +++ b/vendor/github.com/dop251/goja/compiler.go @@ -10,6 +10,7 @@ import ( const ( blockLoop = iota + blockLoopEnum blockTry blockBranch blockSwitch @@ -85,7 +86,7 @@ func (c *compiler) leaveBlock() { for _, item := range c.block.breaks { c.p.code[item] = jump(lbl - item) } - if c.block.typ == blockLoop { + if t := c.block.typ; t == blockLoop || t == blockLoopEnum { for _, item := range c.block.conts { c.p.code[item] = jump(c.block.cont - item) } diff --git a/vendor/github.com/dop251/goja/compiler_expr.go b/vendor/github.com/dop251/goja/compiler_expr.go index c8e9fce78ea..79f9731c34c 100644 --- a/vendor/github.com/dop251/goja/compiler_expr.go +++ b/vendor/github.com/dop251/goja/compiler_expr.go @@ -308,6 +308,24 @@ func (e *compiledIdentifierExpr) emitGetterOrRef() { } else { if found { e.c.emit(getVar{name: e.name, idx: idx, ref: true}) + } else { + e.c.emit(getVar1Ref(e.name)) + } + } +} + +func (e *compiledIdentifierExpr) emitGetterAndCallee() { + e.addSrcMap() + if idx, found, noDynamics := e.c.scope.lookupName(e.name); noDynamics { + if found { + e.c.emit(loadUndef) + e.c.emit(getLocal(idx)) + } else { + panic("No dynamics and not found") + } + } else { + if found { + e.c.emit(getVar{name: e.name, idx: idx, ref: true, callee: true}) } else { e.c.emit(getVar1Callee(e.name)) } @@ -1423,9 +1441,8 @@ func (e *compiledCallExpr) emitGetter(putOnStack bool) { callee.member.emitGetter(true) e.c.emit(getElemCallee) case *compiledIdentifierExpr: - e.c.emit(loadUndef) calleeName = callee.name - callee.emitGetterOrRef() + callee.emitGetterAndCallee() default: e.c.emit(loadUndef) callee.emitGetter(true) diff --git a/vendor/github.com/dop251/goja/compiler_stmt.go b/vendor/github.com/dop251/goja/compiler_stmt.go index a98ac2c44a0..2f546ecb065 100644 --- a/vendor/github.com/dop251/goja/compiler_stmt.go +++ b/vendor/github.com/dop251/goja/compiler_stmt.go @@ -140,13 +140,10 @@ func (c *compiler) compileTryStatement(v *ast.TryStatement) { code[pc] = setLocalP(remap(uint32(instr))) } } + c.p.code[start+1] = pop if catchVarIdx, exists := m[0]; exists { c.p.code[start] = setLocal(catchVarIdx) - c.p.code[start+1] = pop catchOffset-- - } else { - c.p.code[start+1] = nil - catchOffset++ } } else { c.scope.accessed = true @@ -304,7 +301,7 @@ func (c *compiler) compileForInStatement(v *ast.ForInStatement, needResult bool) func (c *compiler) compileLabeledForInStatement(v *ast.ForInStatement, needResult bool, label string) { c.block = &block{ - typ: blockLoop, + typ: blockLoopEnum, outer: c.block, label: label, needResult: needResult, @@ -421,7 +418,7 @@ func (c *compiler) findBranchBlock(st *ast.BranchStatement) *block { func (c *compiler) findContinueBlock(label *ast.Identifier) (block *block) { if label != nil { for b := c.block; b != nil; b = b.outer { - if b.typ == blockLoop && b.label == label.Name { + if (b.typ == blockLoop || b.typ == blockLoopEnum) && b.label == label.Name { block = b break } @@ -429,7 +426,7 @@ func (c *compiler) findContinueBlock(label *ast.Identifier) (block *block) { } else { // find the nearest loop for b := c.block; b != nil; b = b.outer { - if b.typ == blockLoop { + if b.typ == blockLoop || b.typ == blockLoopEnum { block = b break } @@ -452,7 +449,7 @@ func (c *compiler) findBreakBlock(label *ast.Identifier) (block *block) { L: for b := c.block; b != nil; b = b.outer { switch b.typ { - case blockLoop, blockSwitch: + case blockLoop, blockLoopEnum, blockSwitch: block = b break L } @@ -486,7 +483,7 @@ func (c *compiler) compileBreak(label *ast.Identifier, idx file.Idx) { c.emit(halt) case blockWith: c.emit(leaveWith) - case blockLoop, blockSwitch: + case blockLoop, blockLoopEnum, blockSwitch: block = b break L } @@ -510,7 +507,7 @@ func (c *compiler) compileContinue(label *ast.Identifier, idx file.Idx) { for b := c.block; b != nil; b = b.outer { if b.typ == blockTry { c.emit(halt) - } else if b.typ == blockLoop && b.label == label.Name { + } else if (b.typ == blockLoop || b.typ == blockLoopEnum) && b.label == label.Name { block = b break } @@ -520,7 +517,7 @@ func (c *compiler) compileContinue(label *ast.Identifier, idx file.Idx) { for b := c.block; b != nil; b = b.outer { if b.typ == blockTry { c.emit(halt) - } else if b.typ == blockLoop { + } else if b.typ == blockLoop || b.typ == blockLoopEnum { block = b break } @@ -587,10 +584,14 @@ func (c *compiler) compileIfStatement(v *ast.IfStatement, needResult bool) { c.p.code[jmp1] = jump(len(c.p.code) - jmp1) c.markBlockStart() } else { - c.p.code[jmp] = jne(len(c.p.code) - jmp) - c.markBlockStart() if needResult { + c.emit(jump(2)) + c.p.code[jmp] = jne(len(c.p.code) - jmp) c.emit(loadUndef) + c.markBlockStart() + } else { + c.p.code[jmp] = jne(len(c.p.code) - jmp) + c.markBlockStart() } } } @@ -603,8 +604,11 @@ func (c *compiler) compileReturnStatement(v *ast.ReturnStatement) { c.emit(loadUndef) } for b := c.block; b != nil; b = b.outer { - if b.typ == blockTry { + switch b.typ { + case blockTry: c.emit(halt) + case blockLoopEnum: + c.emit(enumPop) } } c.emit(ret) diff --git a/vendor/github.com/dop251/goja/object_goreflect.go b/vendor/github.com/dop251/goja/object_goreflect.go index 8d9ca69135c..1068e4ae287 100644 --- a/vendor/github.com/dop251/goja/object_goreflect.go +++ b/vendor/github.com/dop251/goja/object_goreflect.go @@ -2,8 +2,10 @@ package goja import ( "fmt" + "github.com/dop251/goja/parser" "go/ast" "reflect" + "strings" ) // JsonEncodable allows custom JSON encoding by JSON.stringify() @@ -23,6 +25,44 @@ type FieldNameMapper interface { MethodName(t reflect.Type, m reflect.Method) string } +type tagFieldNameMapper struct { + tagName string + uncapMethods bool +} + +func (tfm tagFieldNameMapper) FieldName(_ reflect.Type, f reflect.StructField) string { + tag := f.Tag.Get(tfm.tagName) + if idx := strings.IndexByte(tag, ','); idx != -1 { + tag = tag[:idx] + } + if parser.IsIdentifier(tag) { + return tag + } + return "" +} + +func uncapitalize(s string) string { + return strings.ToLower(s[0:1]) + s[1:] +} + +func (tfm tagFieldNameMapper) MethodName(_ reflect.Type, m reflect.Method) string { + if tfm.uncapMethods { + return uncapitalize(m.Name) + } + return m.Name +} + +type uncapFieldNameMapper struct { +} + +func (u uncapFieldNameMapper) FieldName(_ reflect.Type, f reflect.StructField) string { + return uncapitalize(f.Name) +} + +func (u uncapFieldNameMapper) MethodName(_ reflect.Type, m reflect.Method) string { + return uncapitalize(m.Name) +} + type reflectFieldInfo struct { Index []int Anonymous bool @@ -512,3 +552,20 @@ func (r *Runtime) SetFieldNameMapper(mapper FieldNameMapper) { r.fieldNameMapper = mapper r.typeInfoCache = nil } + +// TagFieldNameMapper returns a FieldNameMapper that uses the given tagName for struct fields and optionally +// uncapitalises (making the first letter lower case) method names. +// The common tag value syntax is supported (name[,options]), however options are ignored. +// Setting name to anything other than a valid ECMAScript identifier makes the field hidden. +func TagFieldNameMapper(tagName string, uncapMethods bool) FieldNameMapper { + return tagFieldNameMapper{ + tagName: tagName, + uncapMethods: uncapMethods, + } +} + +// UncapFieldNameMapper returns a FieldNameMapper that uncapitalises struct field and method names +// making the first letter lower case. +func UncapFieldNameMapper() FieldNameMapper { + return uncapFieldNameMapper{} +} diff --git a/vendor/github.com/dop251/goja/parser/lexer.go b/vendor/github.com/dop251/goja/parser/lexer.go index 0626b007c34..0786efaca73 100644 --- a/vendor/github.com/dop251/goja/parser/lexer.go +++ b/vendor/github.com/dop251/goja/parser/lexer.go @@ -26,6 +26,10 @@ func isDecimalDigit(chr rune) bool { return '0' <= chr && chr <= '9' } +func IsIdentifier(s string) bool { + return matchIdentifier.MatchString(s) +} + func digitValue(chr rune) int { switch { case '0' <= chr && chr <= '9': diff --git a/vendor/github.com/dop251/goja/vm.go b/vendor/github.com/dop251/goja/vm.go index 95ed61f62df..8739945ada3 100644 --- a/vendor/github.com/dop251/goja/vm.go +++ b/vendor/github.com/dop251/goja/vm.go @@ -19,7 +19,7 @@ type stash struct { values valueStack extraArgs valueStack names map[string]uint32 - obj objectImpl + obj *Object outer *stash } @@ -200,8 +200,8 @@ func (s *valueStack) expand(idx int) { func (s *stash) put(name string, v Value) bool { if s.obj != nil { - if found := s.obj.getStr(name); found != nil { - s.obj.putStr(name, v, false) + if found := s.obj.self.getStr(name); found != nil { + s.obj.self.putStr(name, v, false) return true } return false @@ -232,7 +232,7 @@ func (s *stash) getByIdx(idx uint32) Value { func (s *stash) getByName(name string, _ *vm) (v Value, exists bool) { if s.obj != nil { - v = s.obj.getStr(name) + v = s.obj.self.getStr(name) if v == nil { return nil, false //return valueUnresolved{r: vm.r, ref: name}, false @@ -258,7 +258,7 @@ func (s *stash) createBinding(name string) { func (s *stash) deleteBinding(name string) bool { if s.obj != nil { - return s.obj.deleteStr(name, false) + return s.obj.self.deleteStr(name, false) } if idx, found := s.names[name]; found { s.values[idx] = nil @@ -1332,9 +1332,9 @@ func (s resolveVar1) exec(vm *vm) { var ref ref for stash := vm.stash; stash != nil; stash = stash.outer { if stash.obj != nil { - if stash.obj.hasPropertyStr(name) { + if stash.obj.self.hasPropertyStr(name) { ref = &objRef{ - base: stash.obj, + base: stash.obj.self, name: name, } goto end @@ -1366,8 +1366,8 @@ func (d deleteVar) exec(vm *vm) { ret := true for stash := vm.stash; stash != nil; stash = stash.outer { if stash.obj != nil { - if stash.obj.hasPropertyStr(name) { - ret = stash.obj.deleteStr(name, false) + if stash.obj.self.hasPropertyStr(name) { + ret = stash.obj.self.deleteStr(name, false) goto end } } else { @@ -1416,9 +1416,9 @@ func (s resolveVar1Strict) exec(vm *vm) { var ref ref for stash := vm.stash; stash != nil; stash = stash.outer { if stash.obj != nil { - if stash.obj.hasPropertyStr(name) { + if stash.obj.self.hasPropertyStr(name) { ref = &objRef{ - base: stash.obj, + base: stash.obj.self, name: name, strict: true, } @@ -1492,23 +1492,33 @@ func (g getLocal) exec(vm *vm) { } type getVar struct { - name string - idx uint32 - ref bool + name string + idx uint32 + ref, callee bool } func (g getVar) exec(vm *vm) { level := int(g.idx >> 24) - idx := uint32(g.idx & 0x00FFFFFF) + idx := g.idx & 0x00FFFFFF stash := vm.stash name := g.name for i := 0; i < level; i++ { if v, found := stash.getByName(name, vm); found { + if g.callee { + if stash.obj != nil { + vm.push(stash.obj) + } else { + vm.push(_undefined) + } + } vm.push(v) goto end } stash = stash.outer } + if g.callee { + vm.push(_undefined) + } if stash != nil { vm.push(stash.getByIdx(idx)) } else { @@ -1539,9 +1549,9 @@ func (r resolveVar) exec(vm *vm) { var ref ref for i := 0; i < level; i++ { if stash.obj != nil { - if stash.obj.hasPropertyStr(r.name) { + if stash.obj.self.hasPropertyStr(r.name) { ref = &objRef{ - base: stash.obj, + base: stash.obj.self, name: r.name, strict: r.strict, } @@ -1632,13 +1642,36 @@ func (n getVar1) exec(vm *vm) { vm.pc++ } +type getVar1Ref string + +func (n getVar1Ref) exec(vm *vm) { + name := string(n) + var val Value + for stash := vm.stash; stash != nil; stash = stash.outer { + if v, exists := stash.getByName(name, vm); exists { + val = v + break + } + } + if val == nil { + val = vm.r.globalObject.self.getStr(name) + if val == nil { + val = valueUnresolved{r: vm.r, ref: name} + } + } + vm.push(val) + vm.pc++ +} + type getVar1Callee string func (n getVar1Callee) exec(vm *vm) { name := string(n) var val Value + var callee *Object for stash := vm.stash; stash != nil; stash = stash.outer { if v, exists := stash.getByName(name, vm); exists { + callee = stash.obj val = v break } @@ -1649,6 +1682,11 @@ func (n getVar1Callee) exec(vm *vm) { val = valueUnresolved{r: vm.r, ref: name} } } + if callee != nil { + vm.push(callee) + } else { + vm.push(_undefined) + } vm.push(val) vm.pc++ } @@ -2376,7 +2414,7 @@ var enterWith _enterWith func (_enterWith) exec(vm *vm) { vm.newStash() - vm.stash.obj = vm.stack[vm.sp-1].ToObject(vm.r).self + vm.stash.obj = vm.stack[vm.sp-1].ToObject(vm.r) vm.sp-- vm.pc++ } diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS index b003eca0ca1..1931f400682 100644 --- a/vendor/github.com/gorilla/websocket/AUTHORS +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -4,5 +4,6 @@ # Please keep the list sorted. Gary Burd +Google LLC (https://opensource.google.com/) Joachim Bauch diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go index 43a87c753bf..962c06a391c 100644 --- a/vendor/github.com/gorilla/websocket/client.go +++ b/vendor/github.com/gorilla/websocket/client.go @@ -5,15 +5,15 @@ package websocket import ( - "bufio" "bytes" + "context" "crypto/tls" - "encoding/base64" "errors" "io" "io/ioutil" "net" "net/http" + "net/http/httptrace" "net/url" "strings" "time" @@ -53,6 +53,10 @@ type Dialer struct { // NetDial is nil, net.Dial is used. NetDial func(network, addr string) (net.Conn, error) + // NetDialContext specifies the dial function for creating TCP connections. If + // NetDialContext is nil, net.DialContext is used. + NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + // Proxy specifies a function to return a proxy for a given // Request. If the function returns a non-nil error, the // request is aborted with the provided error. @@ -66,11 +70,22 @@ type Dialer struct { // HandshakeTimeout specifies the duration for the handshake to complete. HandshakeTimeout time.Duration - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer // size is zero, then a useful default size is used. The I/O buffer sizes // do not limit the size of the messages that can be sent or received. ReadBufferSize, WriteBufferSize int + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + // Subprotocols specifies the client's requested subprotocols. Subprotocols []string @@ -86,52 +101,13 @@ type Dialer struct { Jar http.CookieJar } -var errMalformedURL = errors.New("malformed ws or wss URL") - -// parseURL parses the URL. -// -// This function is a replacement for the standard library url.Parse function. -// In Go 1.4 and earlier, url.Parse loses information from the path. -func parseURL(s string) (*url.URL, error) { - // From the RFC: - // - // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ] - // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ] - var u url.URL - switch { - case strings.HasPrefix(s, "ws://"): - u.Scheme = "ws" - s = s[len("ws://"):] - case strings.HasPrefix(s, "wss://"): - u.Scheme = "wss" - s = s[len("wss://"):] - default: - return nil, errMalformedURL - } - - if i := strings.Index(s, "?"); i >= 0 { - u.RawQuery = s[i+1:] - s = s[:i] - } - - if i := strings.Index(s, "/"); i >= 0 { - u.Opaque = s[i:] - s = s[:i] - } else { - u.Opaque = "/" - } - - u.Host = s - - if strings.Contains(u.Host, "@") { - // Don't bother parsing user information because user information is - // not allowed in websocket URIs. - return nil, errMalformedURL - } - - return &u, nil +// Dial creates a new client connection by calling DialContext with a background context. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + return d.DialContext(context.Background(), urlStr, requestHeader) } +var errMalformedURL = errors.New("malformed ws or wss URL") + func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { hostPort = u.Host hostNoPort = u.Host @@ -150,26 +126,29 @@ func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { return hostPort, hostNoPort } -// DefaultDialer is a dialer with all fields set to the default zero values. +// DefaultDialer is a dialer with all fields set to the default values. var DefaultDialer = &Dialer{ - Proxy: http.ProxyFromEnvironment, + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, } -// Dial creates a new client connection. Use requestHeader to specify the +// nilDialer is dialer to use when receiver is nil. +var nilDialer = *DefaultDialer + +// DialContext creates a new client connection. Use requestHeader to specify the // origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). // Use the response.Header to get the selected subprotocol // (Sec-WebSocket-Protocol) and cookies (Set-Cookie). // +// The context will be used in the request and in the Dialer. +// // If the WebSocket handshake fails, ErrBadHandshake is returned along with a // non-nil *http.Response so that callers can handle redirects, authentication, // etcetera. The response body may not contain the entire response and does not // need to be closed by the application. -func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { - +func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { if d == nil { - d = &Dialer{ - Proxy: http.ProxyFromEnvironment, - } + d = &nilDialer } challengeKey, err := generateChallengeKey() @@ -177,7 +156,7 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re return nil, nil, err } - u, err := parseURL(urlStr) + u, err := url.Parse(urlStr) if err != nil { return nil, nil, err } @@ -205,6 +184,7 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re Header: make(http.Header), Host: u.Host, } + req = req.WithContext(ctx) // Set the cookies present in the cookie jar of the dialer if d.Jar != nil { @@ -237,45 +217,83 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re k == "Sec-Websocket-Extensions" || (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + case k == "Sec-Websocket-Protocol": + req.Header["Sec-WebSocket-Protocol"] = vs default: req.Header[k] = vs } } if d.EnableCompression { - req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover") + req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} } - hostPort, hostNoPort := hostPortNoPort(u) - - var proxyURL *url.URL - // Check wether the proxy method has been configured - if d.Proxy != nil { - proxyURL, err = d.Proxy(req) - } - if err != nil { - return nil, nil, err + if d.HandshakeTimeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) + defer cancel() } - var targetHostPort string - if proxyURL != nil { - targetHostPort, _ = hostPortNoPort(proxyURL) + // Get network dial function. + var netDial func(network, add string) (net.Conn, error) + + if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial } else { - targetHostPort = hostPort + netDialer := &net.Dialer{} + netDial = func(network, addr string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, addr) + } } - var deadline time.Time - if d.HandshakeTimeout != 0 { - deadline = time.Now().Add(d.HandshakeTimeout) + // If needed, wrap the dial function to set the connection deadline. + if deadline, ok := ctx.Deadline(); ok { + forwardDial := netDial + netDial = func(network, addr string) (net.Conn, error) { + c, err := forwardDial(network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + c.Close() + return nil, err + } + return c, nil + } } - netDial := d.NetDial - if netDial == nil { - netDialer := &net.Dialer{Deadline: deadline} - netDial = netDialer.Dial + // If needed, wrap the dial function to connect through a proxy. + if d.Proxy != nil { + proxyURL, err := d.Proxy(req) + if err != nil { + return nil, nil, err + } + if proxyURL != nil { + dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + if err != nil { + return nil, nil, err + } + netDial = dialer.Dial + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + trace := httptrace.ContextClientTrace(ctx) + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) } - netConn, err := netDial("tcp", targetHostPort) + netConn, err := netDial("tcp", hostPort) + if trace != nil && trace.GotConn != nil { + trace.GotConn(httptrace.GotConnInfo{ + Conn: netConn, + }) + } if err != nil { return nil, nil, err } @@ -286,42 +304,6 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re } }() - if err := netConn.SetDeadline(deadline); err != nil { - return nil, nil, err - } - - if proxyURL != nil { - connectHeader := make(http.Header) - if user := proxyURL.User; user != nil { - proxyUser := user.Username() - if proxyPassword, passwordSet := user.Password(); passwordSet { - credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) - connectHeader.Set("Proxy-Authorization", "Basic "+credential) - } - } - connectReq := &http.Request{ - Method: "CONNECT", - URL: &url.URL{Opaque: hostPort}, - Host: hostPort, - Header: connectHeader, - } - - connectReq.Write(netConn) - - // Read response. - // Okay to use and discard buffered reader here, because - // TLS server will not speak until spoken to. - br := bufio.NewReader(netConn) - resp, err := http.ReadResponse(br, connectReq) - if err != nil { - return nil, nil, err - } - if resp.StatusCode != 200 { - f := strings.SplitN(resp.Status, " ", 2) - return nil, nil, errors.New(f[1]) - } - } - if u.Scheme == "https" { cfg := cloneTLSConfig(d.TLSClientConfig) if cfg.ServerName == "" { @@ -329,22 +311,31 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re } tlsConn := tls.Client(netConn, cfg) netConn = tlsConn - if err := tlsConn.Handshake(); err != nil { - return nil, nil, err + + var err error + if trace != nil { + err = doHandshakeWithTrace(trace, tlsConn, cfg) + } else { + err = doHandshake(tlsConn, cfg) } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return nil, nil, err - } + + if err != nil { + return nil, nil, err } } - conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize) + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) if err := req.Write(netConn); err != nil { return nil, nil, err } + if trace != nil && trace.GotFirstResponseByte != nil { + if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { + trace.GotFirstResponseByte() + } + } + resp, err := http.ReadResponse(conn.br, req) if err != nil { return nil, nil, err @@ -390,3 +381,15 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re netConn = nil // to avoid close in defer. return conn, resp, nil } + +func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.Handshake(); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go index 97e1dbacb12..ca46d2f793c 100644 --- a/vendor/github.com/gorilla/websocket/conn.go +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -76,7 +76,7 @@ const ( // is UTF-8 encoded text. PingMessage = 9 - // PongMessage denotes a ping control message. The optional message payload + // PongMessage denotes a pong control message. The optional message payload // is UTF-8 encoded text. PongMessage = 10 ) @@ -100,9 +100,8 @@ func (e *netError) Error() string { return e.msg } func (e *netError) Temporary() bool { return e.temporary } func (e *netError) Timeout() bool { return e.timeout } -// CloseError represents close frame. +// CloseError represents a close message. type CloseError struct { - // Code is defined in RFC 6455, section 11.7. Code int @@ -224,6 +223,20 @@ func isValidReceivedCloseCode(code int) bool { return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) } +// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this +// interface. The type of the value stored in a pool is not specified. +type BufferPool interface { + // Get gets a value from the pool or returns nil if the pool is empty. + Get() interface{} + // Put adds a value to the pool. + Put(interface{}) +} + +// writePoolData is the type added to the write buffer pool. This wrapper is +// used to prevent applications from peeking at and depending on the values +// added to the pool. +type writePoolData struct{ buf []byte } + // The Conn type represents a WebSocket connection. type Conn struct { conn net.Conn @@ -231,8 +244,10 @@ type Conn struct { subprotocol string // Write fields - mu chan bool // used as mutex to protect write to conn - writeBuf []byte // frame is constructed in this buffer. + mu chan struct{} // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writePool BufferPool + writeBufSize int writeDeadline time.Time writer io.WriteCloser // the current writer returned to the application isWriting bool // for best-effort concurrent write detection @@ -245,10 +260,12 @@ type Conn struct { newCompressionWriter func(io.WriteCloser, int) io.WriteCloser // Read fields - reader io.ReadCloser // the current reader returned to the application - readErr error - br *bufio.Reader - readRemaining int64 // bytes remaining in current frame. + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + // bytes remaining in current frame. + // set setReadRemaining to safely update this value and prevent overflow + readRemaining int64 readFinal bool // true the current message has more frames. readLength int64 // Message size. readLimit int64 // Maximum message size. @@ -264,64 +281,29 @@ type Conn struct { newDecompressionReader func(io.Reader) io.ReadCloser } -func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { - return newConnBRW(conn, isServer, readBufferSize, writeBufferSize, nil) -} - -type writeHook struct { - p []byte -} - -func (wh *writeHook) Write(p []byte) (int, error) { - wh.p = p - return len(p), nil -} +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { -func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, brw *bufio.ReadWriter) *Conn { - mu := make(chan bool, 1) - mu <- true - - var br *bufio.Reader - if readBufferSize == 0 && brw != nil && brw.Reader != nil { - // Reuse the supplied bufio.Reader if the buffer has a useful size. - // This code assumes that peek on a reader returns - // bufio.Reader.buf[:0]. - brw.Reader.Reset(conn) - if p, err := brw.Reader.Peek(0); err == nil && cap(p) >= 256 { - br = brw.Reader - } - } if br == nil { if readBufferSize == 0 { readBufferSize = defaultReadBufferSize - } - if readBufferSize < maxControlFramePayloadSize { + } else if readBufferSize < maxControlFramePayloadSize { + // must be large enough for control frame readBufferSize = maxControlFramePayloadSize } br = bufio.NewReaderSize(conn, readBufferSize) } - var writeBuf []byte - if writeBufferSize == 0 && brw != nil && brw.Writer != nil { - // Use the bufio.Writer's buffer if the buffer has a useful size. This - // code assumes that bufio.Writer.buf[:1] is passed to the - // bufio.Writer's underlying writer. - var wh writeHook - brw.Writer.Reset(&wh) - brw.Writer.WriteByte(0) - brw.Flush() - if cap(wh.p) >= maxFrameHeaderSize+256 { - writeBuf = wh.p[:cap(wh.p)] - } + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize } + writeBufferSize += maxFrameHeaderSize - if writeBuf == nil { - if writeBufferSize == 0 { - writeBufferSize = defaultWriteBufferSize - } - writeBuf = make([]byte, writeBufferSize+maxFrameHeaderSize) + if writeBuf == nil && writeBufferPool == nil { + writeBuf = make([]byte, writeBufferSize) } + mu := make(chan struct{}, 1) + mu <- struct{}{} c := &Conn{ isServer: isServer, br: br, @@ -329,6 +311,8 @@ func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize in mu: mu, readFinal: true, writeBuf: writeBuf, + writePool: writeBufferPool, + writeBufSize: writeBufferSize, enableWriteCompression: true, compressionLevel: defaultCompressionLevel, } @@ -338,12 +322,24 @@ func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize in return c } +// setReadRemaining tracks the number of bytes remaining on the connection. If n +// overflows, an ErrReadLimit is returned. +func (c *Conn) setReadRemaining(n int64) error { + if n < 0 { + return ErrReadLimit + } + + c.readRemaining = n + return nil +} + // Subprotocol returns the negotiated protocol for the connection. func (c *Conn) Subprotocol() string { return c.subprotocol } -// Close closes the underlying network connection without sending or waiting for a close frame. +// Close closes the underlying network connection without sending or waiting +// for a close message. func (c *Conn) Close() error { return c.conn.Close() } @@ -370,9 +366,18 @@ func (c *Conn) writeFatal(err error) error { return err } -func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} + +func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { <-c.mu - defer func() { c.mu <- true }() + defer func() { c.mu <- struct{}{} }() c.writeErrMu.Lock() err := c.writeErr @@ -382,15 +387,14 @@ func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { } c.conn.SetWriteDeadline(deadline) - for _, buf := range bufs { - if len(buf) > 0 { - _, err := c.conn.Write(buf) - if err != nil { - return c.writeFatal(err) - } - } + if len(buf1) == 0 { + _, err = c.conn.Write(buf0) + } else { + err = c.writeBufs(buf0, buf1) + } + if err != nil { + return c.writeFatal(err) } - if frameType == CloseMessage { c.writeFatal(ErrCloseSent) } @@ -425,7 +429,7 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er maskBytes(key, 0, buf[6:]) } - d := time.Hour * 1000 + d := 1000 * time.Hour if !deadline.IsZero() { d = deadline.Sub(time.Now()) if d < 0 { @@ -440,7 +444,7 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er case <-timer.C: return errWriteTimeout } - defer func() { c.mu <- true }() + defer func() { c.mu <- struct{}{} }() c.writeErrMu.Lock() err := c.writeErr @@ -460,7 +464,8 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er return err } -func (c *Conn) prepWrite(messageType int) error { +// beginMessage prepares a connection and message writer for a new message. +func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { // Close previous writer if not already closed by the application. It's // probably better to return an error in this situation, but we cannot // change this without breaking existing applications. @@ -476,7 +481,23 @@ func (c *Conn) prepWrite(messageType int) error { c.writeErrMu.Lock() err := c.writeErr c.writeErrMu.Unlock() - return err + if err != nil { + return err + } + + mw.c = c + mw.frameType = messageType + mw.pos = maxFrameHeaderSize + + if c.writeBuf == nil { + wpd, ok := c.writePool.Get().(writePoolData) + if ok { + c.writeBuf = wpd.buf + } else { + c.writeBuf = make([]byte, c.writeBufSize) + } + } + return nil } // NextWriter returns a writer for the next message to send. The writer's Close @@ -484,17 +505,15 @@ func (c *Conn) prepWrite(messageType int) error { // // There can be at most one open writer on a connection. NextWriter closes the // previous writer if the application has not already done so. +// +// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and +// PongMessage) are supported. func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { - if err := c.prepWrite(messageType); err != nil { + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { return nil, err } - - mw := &messageWriter{ - c: c, - frameType: messageType, - pos: maxFrameHeaderSize, - } - c.writer = mw + c.writer = &mw if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { w := c.newCompressionWriter(c.writer, c.compressionLevel) mw.compress = true @@ -511,10 +530,16 @@ type messageWriter struct { err error } -func (w *messageWriter) fatal(err error) error { +func (w *messageWriter) endMessage(err error) error { if w.err != nil { - w.err = err - w.c.writer = nil + return err + } + c := w.c + w.err = err + c.writer = nil + if c.writePool != nil { + c.writePool.Put(writePoolData{buf: c.writeBuf}) + c.writeBuf = nil } return err } @@ -528,7 +553,7 @@ func (w *messageWriter) flushFrame(final bool, extra []byte) error { // Check for invalid control frames. if isControl(w.frameType) && (!final || length > maxControlFramePayloadSize) { - return w.fatal(errInvalidControlFrame) + return w.endMessage(errInvalidControlFrame) } b0 := byte(w.frameType) @@ -573,7 +598,7 @@ func (w *messageWriter) flushFrame(final bool, extra []byte) error { copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) if len(extra) > 0 { - return c.writeFatal(errors.New("websocket: internal error, extra used in client mode")) + return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) } } @@ -594,11 +619,11 @@ func (w *messageWriter) flushFrame(final bool, extra []byte) error { c.isWriting = false if err != nil { - return w.fatal(err) + return w.endMessage(err) } if final { - c.writer = nil + w.endMessage(errWriteClosed) return nil } @@ -696,11 +721,7 @@ func (w *messageWriter) Close() error { if w.err != nil { return w.err } - if err := w.flushFrame(true, nil); err != nil { - return err - } - w.err = errWriteClosed - return nil + return w.flushFrame(true, nil) } // WritePreparedMessage writes prepared message into connection. @@ -732,10 +753,10 @@ func (c *Conn) WriteMessage(messageType int, data []byte) error { if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { // Fast path with no allocations and single frame. - if err := c.prepWrite(messageType); err != nil { + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { return err } - mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize} n := copy(c.writeBuf[mw.pos:], data) mw.pos += n data = data[n:] @@ -764,7 +785,6 @@ func (c *Conn) SetWriteDeadline(t time.Time) error { // Read methods func (c *Conn) advanceFrame() (int, error) { - // 1. Skip remainder of previous frame. if c.readRemaining > 0 { @@ -783,7 +803,7 @@ func (c *Conn) advanceFrame() (int, error) { final := p[0]&finalBit != 0 frameType := int(p[0] & 0xf) mask := p[1]&maskBit != 0 - c.readRemaining = int64(p[1] & 0x7f) + c.setReadRemaining(int64(p[1] & 0x7f)) c.readDecompress = false if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { @@ -817,7 +837,17 @@ func (c *Conn) advanceFrame() (int, error) { return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) } - // 3. Read and parse frame length. + // 3. Read and parse frame length as per + // https://tools.ietf.org/html/rfc6455#section-5.2 + // + // The length of the "Payload data", in bytes: if 0-125, that is the payload + // length. + // - If 126, the following 2 bytes interpreted as a 16-bit unsigned + // integer are the payload length. + // - If 127, the following 8 bytes interpreted as + // a 64-bit unsigned integer (the most significant bit MUST be 0) are the + // payload length. Multibyte length quantities are expressed in network byte + // order. switch c.readRemaining { case 126: @@ -825,13 +855,19 @@ func (c *Conn) advanceFrame() (int, error) { if err != nil { return noFrame, err } - c.readRemaining = int64(binary.BigEndian.Uint16(p)) + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { + return noFrame, err + } case 127: p, err := c.read(8) if err != nil { return noFrame, err } - c.readRemaining = int64(binary.BigEndian.Uint64(p)) + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { + return noFrame, err + } } // 4. Handle frame masking. @@ -854,6 +890,12 @@ func (c *Conn) advanceFrame() (int, error) { if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { c.readLength += c.readRemaining + // Don't allow readLength to overflow in the presence of a large readRemaining + // counter. + if c.readLength < 0 { + return noFrame, ErrReadLimit + } + if c.readLimit > 0 && c.readLength > c.readLimit { c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) return noFrame, ErrReadLimit @@ -867,7 +909,7 @@ func (c *Conn) advanceFrame() (int, error) { var payload []byte if c.readRemaining > 0 { payload, err = c.read(int(c.readRemaining)) - c.readRemaining = 0 + c.setReadRemaining(0) if err != nil { return noFrame, err } @@ -940,6 +982,7 @@ func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { c.readErr = hideTempErr(err) break } + if frameType == TextMessage || frameType == BinaryMessage { c.messageReader = &messageReader{c} c.reader = c.messageReader @@ -980,7 +1023,9 @@ func (r *messageReader) Read(b []byte) (int, error) { if c.isServer { c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) } - c.readRemaining -= int64(n) + rem := c.readRemaining + rem -= int64(n) + c.setReadRemaining(rem) if c.readRemaining > 0 && c.readErr == io.EOF { c.readErr = errUnexpectedEOF } @@ -1032,8 +1077,8 @@ func (c *Conn) SetReadDeadline(t time.Time) error { return c.conn.SetReadDeadline(t) } -// SetReadLimit sets the maximum size for a message read from the peer. If a -// message exceeds the limit, the connection sends a close frame to the peer +// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a +// message exceeds the limit, the connection sends a close message to the peer // and returns ErrReadLimit to the application. func (c *Conn) SetReadLimit(limit int64) { c.readLimit = limit @@ -1046,24 +1091,22 @@ func (c *Conn) CloseHandler() func(code int, text string) error { // SetCloseHandler sets the handler for close messages received from the peer. // The code argument to h is the received close code or CloseNoStatusReceived -// if the close message is empty. The default close handler sends a close frame -// back to the peer. +// if the close message is empty. The default close handler sends a close +// message back to the peer. // -// The application must read the connection to process close messages as -// described in the section on Control Frames above. +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. // -// The connection read methods return a CloseError when a close frame is +// The connection read methods return a CloseError when a close message is // received. Most applications should handle close messages as part of their // normal error handling. Applications should only set a close handler when the -// application must perform some action before sending a close frame back to +// application must perform some action before sending a close message back to // the peer. func (c *Conn) SetCloseHandler(h func(code int, text string) error) { if h == nil { h = func(code int, text string) error { - message := []byte{} - if code != CloseNoStatusReceived { - message = FormatCloseMessage(code, "") - } + message := FormatCloseMessage(code, "") c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) return nil } @@ -1077,11 +1120,12 @@ func (c *Conn) PingHandler() func(appData string) error { } // SetPingHandler sets the handler for ping messages received from the peer. -// The appData argument to h is the PING frame application data. The default +// The appData argument to h is the PING message application data. The default // ping handler sends a pong to the peer. // -// The application must read the connection to process ping messages as -// described in the section on Control Frames above. +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. func (c *Conn) SetPingHandler(h func(appData string) error) { if h == nil { h = func(message string) error { @@ -1103,11 +1147,12 @@ func (c *Conn) PongHandler() func(appData string) error { } // SetPongHandler sets the handler for pong messages received from the peer. -// The appData argument to h is the PONG frame application data. The default +// The appData argument to h is the PONG message application data. The default // pong handler does nothing. // -// The application must read the connection to process ping messages as -// described in the section on Control Frames above. +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. func (c *Conn) SetPongHandler(h func(appData string) error) { if h == nil { h = func(string) error { return nil } @@ -1141,7 +1186,14 @@ func (c *Conn) SetCompressionLevel(level int) error { } // FormatCloseMessage formats closeCode and text as a WebSocket close message. +// An empty message is returned for code CloseNoStatusReceived. func FormatCloseMessage(closeCode int, text string) []byte { + if closeCode == CloseNoStatusReceived { + // Return empty message because it's illegal to send + // CloseNoStatusReceived. Return non-nil value in case application + // checks for nil. + return []byte{} + } buf := make([]byte, 2+len(text)) binary.BigEndian.PutUint16(buf, uint16(closeCode)) copy(buf[2:], text) diff --git a/vendor/github.com/gorilla/websocket/conn_read_legacy.go b/vendor/github.com/gorilla/websocket/conn_read_legacy.go deleted file mode 100644 index 018541cf6cb..00000000000 --- a/vendor/github.com/gorilla/websocket/conn_read_legacy.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.5 - -package websocket - -import "io" - -func (c *Conn) read(n int) ([]byte, error) { - p, err := c.br.Peek(n) - if err == io.EOF { - err = errUnexpectedEOF - } - if len(p) > 0 { - // advance over the bytes just read - io.ReadFull(c.br, p) - } - return p, err -} diff --git a/vendor/github.com/gorilla/websocket/conn_read.go b/vendor/github.com/gorilla/websocket/conn_write.go similarity index 52% rename from vendor/github.com/gorilla/websocket/conn_read.go rename to vendor/github.com/gorilla/websocket/conn_write.go index 1ea15059ee1..a509a21f87a 100644 --- a/vendor/github.com/gorilla/websocket/conn_read.go +++ b/vendor/github.com/gorilla/websocket/conn_write.go @@ -2,17 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.5 +// +build go1.8 package websocket -import "io" +import "net" -func (c *Conn) read(n int) ([]byte, error) { - p, err := c.br.Peek(n) - if err == io.EOF { - err = errUnexpectedEOF - } - c.br.Discard(len(p)) - return p, err +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err } diff --git a/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/vendor/github.com/gorilla/websocket/conn_write_legacy.go new file mode 100644 index 00000000000..37edaff5a57 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_write_legacy.go @@ -0,0 +1,18 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +func (c *Conn) writeBufs(bufs ...[]byte) error { + for _, buf := range bufs { + if len(buf) > 0 { + if _, err := c.conn.Write(buf); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go index e291a952c1e..8db0cef95a2 100644 --- a/vendor/github.com/gorilla/websocket/doc.go +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -6,9 +6,8 @@ // // Overview // -// The Conn type represents a WebSocket connection. A server application uses -// the Upgrade function from an Upgrader object with a HTTP request handler -// to get a pointer to a Conn: +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: // // var upgrader = websocket.Upgrader{ // ReadBufferSize: 1024, @@ -31,10 +30,12 @@ // for { // messageType, p, err := conn.ReadMessage() // if err != nil { +// log.Println(err) // return // } -// if err = conn.WriteMessage(messageType, p); err != nil { -// return err +// if err := conn.WriteMessage(messageType, p); err != nil { +// log.Println(err) +// return // } // } // @@ -85,20 +86,26 @@ // and pong. Call the connection WriteControl, WriteMessage or NextWriter // methods to send a control message to the peer. // -// Connections handle received close messages by sending a close message to the -// peer and returning a *CloseError from the the NextReader, ReadMessage or the -// message Read method. +// Connections handle received close messages by calling the handler function +// set with the SetCloseHandler method and by returning a *CloseError from the +// NextReader, ReadMessage or the message Read method. The default close +// handler sends a close message to the peer. // -// Connections handle received ping and pong messages by invoking callback -// functions set with SetPingHandler and SetPongHandler methods. The callback -// functions are called from the NextReader, ReadMessage and the message Read -// methods. +// Connections handle received ping messages by calling the handler function +// set with the SetPingHandler method. The default ping handler sends a pong +// message to the peer. +// +// Connections handle received pong messages by calling the handler function +// set with the SetPongHandler method. The default pong handler does nothing. +// If an application sends ping messages, then the application should set a +// pong handler to receive the corresponding pong. // -// The default ping handler sends a pong to the peer. The application's reading -// goroutine can block for a short time while the handler writes the pong data -// to the connection. +// The control message handler functions are called from the NextReader, +// ReadMessage and message reader Read methods. The default close and ping +// handlers can block these methods for a short time when the handler writes to +// the connection. // -// The application must read the connection to process ping, pong and close +// The application must read the connection to process close, ping and pong // messages sent from the peer. If the application is not otherwise interested // in messages from the peer, then the application should start a goroutine to // read and discard messages from the peer. A simple example is: @@ -137,19 +144,59 @@ // method fails the WebSocket handshake with HTTP status 403. // // If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail -// the handshake if the Origin request header is present and not equal to the -// Host request header. -// -// An application can allow connections from any origin by specifying a -// function that always returns true: -// -// var upgrader = websocket.Upgrader{ -// CheckOrigin: func(r *http.Request) bool { return true }, -// } -// -// The deprecated Upgrade function does not enforce an origin policy. It's the -// application's responsibility to check the Origin header before calling -// Upgrade. +// the handshake if the Origin request header is present and the Origin host is +// not equal to the Host request header. +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Buffers +// +// Connections buffer network input and output to reduce the number +// of system calls when reading or writing messages. +// +// Write buffers are also used for constructing WebSocket frames. See RFC 6455, +// Section 5 for a discussion of message framing. A WebSocket frame header is +// written to the network each time a write buffer is flushed to the network. +// Decreasing the size of the write buffer can increase the amount of framing +// overhead on the connection. +// +// The buffer sizes in bytes are specified by the ReadBufferSize and +// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default +// size of 4096 when a buffer size field is set to zero. The Upgrader reuses +// buffers created by the HTTP server when a buffer size field is set to zero. +// The HTTP server buffers have a size of 4096 at the time of this writing. +// +// The buffer sizes do not limit the size of a message that can be read or +// written by a connection. +// +// Buffers are held for the lifetime of the connection by default. If the +// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the +// write buffer only when writing a message. +// +// Applications should tune the buffer sizes to balance memory use and +// performance. Increasing the buffer size uses more memory, but can reduce the +// number of system calls to read or write the network. In the case of writing, +// increasing the buffer size can reduce the number of frame headers written to +// the network. +// +// Some guidelines for setting buffer parameters are: +// +// Limit the buffer sizes to the maximum expected message size. Buffers larger +// than the largest message do not provide any benefit. +// +// Depending on the distribution of message sizes, setting the buffer size to +// a value less than the maximum expected message size can greatly reduce memory +// use with a small impact on performance. Here's an example: If 99% of the +// messages are smaller than 256 bytes and the maximum message size is 512 +// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls +// than a buffer size of 512 bytes. The memory savings is 50%. +// +// A write buffer pool is useful when the application has a modest number +// writes over a large number of connections. when buffers are pooled, a larger +// buffer size has a reduced impact on total memory use and has the benefit of +// reducing system calls and frame overhead. // // Compression EXPERIMENTAL // diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go new file mode 100644 index 00000000000..c64f8c82901 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/join.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "io" + "strings" +) + +// JoinMessages concatenates received messages to create a single io.Reader. +// The string term is appended to each message. The returned reader does not +// support concurrent calls to the Read method. +func JoinMessages(c *Conn, term string) io.Reader { + return &joinReader{c: c, term: term} +} + +type joinReader struct { + c *Conn + term string + r io.Reader +} + +func (r *joinReader) Read(p []byte) (int, error) { + if r.r == nil { + var err error + _, r.r, err = r.c.NextReader() + if err != nil { + return 0, err + } + if r.term != "" { + r.r = io.MultiReader(r.r, strings.NewReader(r.term)) + } + } + n, err := r.r.Read(p) + if err == io.EOF { + err = nil + r.r = nil + } + return n, err +} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go index 4f0e36875a5..dc2c1f6415f 100644 --- a/vendor/github.com/gorilla/websocket/json.go +++ b/vendor/github.com/gorilla/websocket/json.go @@ -9,12 +9,14 @@ import ( "io" ) -// WriteJSON is deprecated, use c.WriteJSON instead. +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. func WriteJSON(c *Conn, v interface{}) error { return c.WriteJSON(v) } -// WriteJSON writes the JSON encoding of v to the connection. +// WriteJSON writes the JSON encoding of v as a message. // // See the documentation for encoding/json Marshal for details about the // conversion of Go values to JSON. @@ -31,7 +33,10 @@ func (c *Conn) WriteJSON(v interface{}) error { return err2 } -// ReadJSON is deprecated, use c.ReadJSON instead. +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. func ReadJSON(c *Conn, v interface{}) error { return c.ReadJSON(v) } diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go index 6a88bbc7434..577fce9efd7 100644 --- a/vendor/github.com/gorilla/websocket/mask.go +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -11,7 +11,6 @@ import "unsafe" const wordSize = int(unsafe.Sizeof(uintptr(0))) func maskBytes(key [4]byte, pos int, b []byte) int { - // Mask one byte at a time for small buffers. if len(b) < 2*wordSize { for i := range b { diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go index 1efffbd1ebe..c854225e967 100644 --- a/vendor/github.com/gorilla/websocket/prepared.go +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -19,7 +19,6 @@ import ( type PreparedMessage struct { messageType int data []byte - err error mu sync.Mutex frames map[prepareKey]*preparedFrame } @@ -74,8 +73,8 @@ func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { // Prepare a frame using a 'fake' connection. // TODO: Refactor code in conn.go to allow more direct construction of // the frame. - mu := make(chan bool, 1) - mu <- true + mu := make(chan struct{}, 1) + mu <- struct{}{} var nc prepareConn c := &Conn{ conn: &nc, diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 00000000000..e87a8c9f0c9 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "net" + "net/http" + "net/url" + "strings" +) + +type netDialerFunc func(network, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + forwardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.forwardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + conn.Close() + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + + if resp.StatusCode != 200 { + conn.Close() + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go index 3495e0f1abc..887d558918c 100644 --- a/vendor/github.com/gorilla/websocket/server.go +++ b/vendor/github.com/gorilla/websocket/server.go @@ -7,7 +7,7 @@ package websocket import ( "bufio" "errors" - "net" + "io" "net/http" "net/url" "strings" @@ -27,16 +27,29 @@ type Upgrader struct { // HandshakeTimeout specifies the duration for the handshake to complete. HandshakeTimeout time.Duration - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer // size is zero, then buffers allocated by the HTTP server are used. The // I/O buffer sizes do not limit the size of the messages that can be sent // or received. ReadBufferSize, WriteBufferSize int + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + // Subprotocols specifies the server's supported protocols in order of - // preference. If this field is set, then the Upgrade method negotiates a + // preference. If this field is not nil, then the Upgrade method negotiates a // subprotocol by selecting the first match in this list with a protocol - // requested by the client. + // requested by the client. If there's no match, then no protocol is + // negotiated (the Sec-Websocket-Protocol header is not included in the + // handshake response). Subprotocols []string // Error specifies the function for generating HTTP error responses. If Error @@ -44,8 +57,12 @@ type Upgrader struct { Error func(w http.ResponseWriter, r *http.Request, status int, reason error) // CheckOrigin returns true if the request Origin header is acceptable. If - // CheckOrigin is nil, the host in the Origin header must not be set or - // must match the host of the request. + // CheckOrigin is nil, then a safe default is used: return false if the + // Origin request header is present and the origin host is not equal to + // request Host header. + // + // A CheckOrigin function should carefully validate the request origin to + // prevent cross-site request forgery. CheckOrigin func(r *http.Request) bool // EnableCompression specify if the server should attempt to negotiate per @@ -76,7 +93,7 @@ func checkSameOrigin(r *http.Request) bool { if err != nil { return false } - return u.Host == r.Host + return equalASCIIFold(u.Host, r.Host) } func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { @@ -99,42 +116,44 @@ func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header // // The responseHeader is included in the response to the client's upgrade // request. Use the responseHeader to specify cookies (Set-Cookie) and the -// application negotiated subprotocol (Sec-Websocket-Protocol). +// application negotiated subprotocol (Sec-WebSocket-Protocol). // // If the upgrade fails, then Upgrade replies to the client with an HTTP error // response. func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { - if r.Method != "GET" { - return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET") - } - - if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported") - } + const badHandshake = "websocket: the client is not using the websocket protocol: " if !tokenListContainsValue(r.Header, "Connection", "upgrade") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header") + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") } if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header") + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + } + + if r.Method != "GET" { + return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") } if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") } + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") + } + checkOrigin := u.CheckOrigin if checkOrigin == nil { checkOrigin = checkSameOrigin } if !checkOrigin(r) { - return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed") + return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") } challengeKey := r.Header.Get("Sec-Websocket-Key") if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank") + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank") } subprotocol := u.selectSubprotocol(r, responseHeader) @@ -151,17 +170,12 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } } - var ( - netConn net.Conn - err error - ) - h, ok := w.(http.Hijacker) if !ok { return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") } var brw *bufio.ReadWriter - netConn, brw, err = h.Hijack() + netConn, brw, err := h.Hijack() if err != nil { return u.returnError(w, r, http.StatusInternalServerError, err.Error()) } @@ -171,7 +185,21 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade return nil, errors.New("websocket: client sent data before handshake is complete") } - c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw) + var br *bufio.Reader + if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { + // Reuse hijacked buffered reader as connection reader. + br = brw.Reader + } + + buf := bufioWriterBuffer(netConn, brw.Writer) + + var writeBuf []byte + if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { + // Reuse hijacked write buffer as connection buffer. + writeBuf = buf + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) c.subprotocol = subprotocol if compress { @@ -179,17 +207,23 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade c.newDecompressionReader = decompressNoContextTakeover } - p := c.writeBuf[:0] + // Use larger of hijacked buffer and connection write buffer for header. + p := buf + if len(c.writeBuf) > len(p) { + p = c.writeBuf + } + p = p[:0] + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) p = append(p, computeAcceptKey(challengeKey)...) p = append(p, "\r\n"...) if c.subprotocol != "" { - p = append(p, "Sec-Websocket-Protocol: "...) + p = append(p, "Sec-WebSocket-Protocol: "...) p = append(p, c.subprotocol...) p = append(p, "\r\n"...) } if compress { - p = append(p, "Sec-Websocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) } for k, vs := range responseHeader { if k == "Sec-Websocket-Protocol" { @@ -230,13 +264,14 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade // Upgrade upgrades the HTTP server connection to the WebSocket protocol. // -// This function is deprecated, use websocket.Upgrader instead. +// Deprecated: Use websocket.Upgrader instead. // -// The application is responsible for checking the request origin before -// calling Upgrade. An example implementation of the same origin policy is: +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: // // if req.Header.Get("Origin") != "http://"+req.Host { -// http.Error(w, "Origin not allowed", 403) +// http.Error(w, "Origin not allowed", http.StatusForbidden) // return // } // @@ -289,3 +324,40 @@ func IsWebSocketUpgrade(r *http.Request) bool { return tokenListContainsValue(r.Header, "Connection", "upgrade") && tokenListContainsValue(r.Header, "Upgrade", "websocket") } + +// bufioReaderSize size returns the size of a bufio.Reader. +func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { + // This code assumes that peek on a reset reader returns + // bufio.Reader.buf[:0]. + // TODO: Use bufio.Reader.Size() after Go 1.10 + br.Reset(originalReader) + if p, err := br.Peek(0); err == nil { + return cap(p) + } + return 0 +} + +// writeHook is an io.Writer that records the last slice passed to it vio +// io.Writer.Write. +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +// bufioWriterBuffer grabs the buffer from a bufio.Writer. +func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { + // This code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + bw.Reset(&wh) + bw.WriteByte(0) + bw.Flush() + + bw.Reset(originalWriter) + + return wh.p[:cap(wh.p)] +} diff --git a/vendor/github.com/gorilla/websocket/trace.go b/vendor/github.com/gorilla/websocket/trace.go new file mode 100644 index 00000000000..834f122a00d --- /dev/null +++ b/vendor/github.com/gorilla/websocket/trace.go @@ -0,0 +1,19 @@ +// +build go1.8 + +package websocket + +import ( + "crypto/tls" + "net/http/httptrace" +) + +func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { + if trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := doHandshake(tlsConn, cfg) + if trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/trace_17.go b/vendor/github.com/gorilla/websocket/trace_17.go new file mode 100644 index 00000000000..77d05a0b574 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/trace_17.go @@ -0,0 +1,12 @@ +// +build !go1.8 + +package websocket + +import ( + "crypto/tls" + "net/http/httptrace" +) + +func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { + return doHandshake(tlsConn, cfg) +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go index 9a4908df2ee..7bf2f66c674 100644 --- a/vendor/github.com/gorilla/websocket/util.go +++ b/vendor/github.com/gorilla/websocket/util.go @@ -11,6 +11,7 @@ import ( "io" "net/http" "strings" + "unicode/utf8" ) var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") @@ -30,68 +31,113 @@ func generateChallengeKey() (string, error) { return base64.StdEncoding.EncodeToString(p), nil } -// Octet types from RFC 2616. -var octetTypes [256]byte - -const ( - isTokenOctet = 1 << iota - isSpaceOctet -) - -func init() { - // From RFC 2616 - // - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t byte - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpaceOctet - } - if isChar && !isCtl && !isSeparator { - t |= isTokenOctet - } - octetTypes[c] = t - } +// Token octets per RFC 2616. +var isTokenOctet = [256]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, } +// skipSpace returns a slice of the string s with all leading RFC 2616 linear +// whitespace removed. func skipSpace(s string) (rest string) { i := 0 for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpaceOctet == 0 { + if b := s[i]; b != ' ' && b != '\t' { break } } return s[i:] } +// nextToken returns the leading RFC 2616 token of s and the string following +// the token. func nextToken(s string) (token, rest string) { i := 0 for ; i < len(s); i++ { - if octetTypes[s[i]]&isTokenOctet == 0 { + if !isTokenOctet[s[i]] { break } } return s[:i], s[i:] } +// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 +// and the string following the token or quoted string. func nextTokenOrQuoted(s string) (value string, rest string) { if !strings.HasPrefix(s, "\"") { return nextToken(s) @@ -111,14 +157,14 @@ func nextTokenOrQuoted(s string) (value string, rest string) { case escape: escape = false p[j] = b - j += 1 + j++ case b == '\\': escape = true case b == '"': return string(p[:j]), s[i+1:] default: p[j] = b - j += 1 + j++ } } return "", "" @@ -127,8 +173,32 @@ func nextTokenOrQuoted(s string) (value string, rest string) { return "", "" } +// equalASCIIFold returns true if s is equal to t with ASCII case folding as +// defined in RFC 4790. +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} + // tokenListContainsValue returns true if the 1#token header with the given -// name contains token. +// name contains a token equal to value with ASCII case folding. func tokenListContainsValue(header http.Header, name string, value string) bool { headers: for _, s := range header[name] { @@ -142,7 +212,7 @@ headers: if s != "" && s[0] != ',' { continue headers } - if strings.EqualFold(t, value) { + if equalASCIIFold(t, value) { return true } if s == "" { @@ -154,9 +224,8 @@ headers: return false } -// parseExtensiosn parses WebSocket extensions from a header. +// parseExtensions parses WebSocket extensions from a header. func parseExtensions(header http.Header) []map[string]string { - // From RFC 6455: // // Sec-WebSocket-Extensions = extension-list diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 00000000000..2e668f6b882 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +}