From 3874725d1e9c645a549b270be800a266bb2b3021 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 19 Oct 2020 09:16:52 -0400 Subject: [PATCH 01/40] [Elastic Agent] Fix index for Agent monitoring to to elastic_agent. (#21932) * Change to elastic_agent. * Add changelog. --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + .../pkg/agent/operation/monitoring.go | 18 +++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 64d1a3b589b..5284da8db2b 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -18,6 +18,7 @@ - Prevent reporting ecs version twice {pull}21616[21616] - Partial extracted beat result in failure to spawn beat {issue}21718[21718] - Use local temp instead of system one {pull}21883[21883] +- Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] ==== New features diff --git a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go index c4d895eb6ee..74d542d58e9 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go +++ b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go @@ -186,14 +186,14 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "paths": []string{ filepath.Join(paths.Home(), "logs", "elastic-agent-json.log"), }, - "index": "logs-elastic.agent-default", + "index": "logs-elastic_agent-default", "processors": []map[string]interface{}{ { "add_fields": map[string]interface{}{ "target": "data_stream", "fields": map[string]interface{}{ "type": "logs", - "dataset": "elastic.agent", + "dataset": "elastic_agent", "namespace": "default", }, }, @@ -202,7 +202,7 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "add_fields": map[string]interface{}{ "target": "event", "fields": map[string]interface{}{ - "dataset": "elastic.agent", + "dataset": "elastic_agent", }, }, }, @@ -220,14 +220,14 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "message_key": "message", }, "paths": paths, - "index": fmt.Sprintf("logs-elastic.agent.%s-default", name), + "index": fmt.Sprintf("logs-elastic_agent.%s-default", name), "processors": []map[string]interface{}{ { "add_fields": map[string]interface{}{ "target": "data_stream", "fields": map[string]interface{}{ "type": "logs", - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), "namespace": "default", }, }, @@ -236,7 +236,7 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "add_fields": map[string]interface{}{ "target": "event", "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), }, }, }, @@ -270,14 +270,14 @@ func (o *Operator) getMonitoringMetricbeatConfig(output interface{}) (map[string "metricsets": []string{"stats", "state"}, "period": "10s", "hosts": endpoints, - "index": fmt.Sprintf("metrics-elastic.agent.%s-default", name), + "index": fmt.Sprintf("metrics-elastic_agent.%s-default", name), "processors": []map[string]interface{}{ { "add_fields": map[string]interface{}{ "target": "data_stream", "fields": map[string]interface{}{ "type": "metrics", - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), "namespace": "default", }, }, @@ -286,7 +286,7 @@ func (o *Operator) getMonitoringMetricbeatConfig(output interface{}) (map[string "add_fields": map[string]interface{}{ "target": "event", "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), }, }, }, From 803ddcada71fcb0ccb398a7f64db38836bb9f472 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 19 Oct 2020 10:13:10 -0400 Subject: [PATCH 02/40] [Elastic Agent] Fix named pipe communication on Windows 7 (#21931) * Fix named pipes on Windows 7. * Add changelog fix notice. --- NOTICE.txt | 6 +++--- go.mod | 1 + go.sum | 4 ++-- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 349fe58b3d1..477f0b53201 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -2183,12 +2183,12 @@ Contents of probable licence file $GOMODCACHE/github.com/!azure/go-autorest/auto -------------------------------------------------------------------------------- -Dependency : github.com/Microsoft/go-winio -Version: v0.4.15-0.20190919025122-fc70bd9a86b5 +Dependency : github.com/bi-zone/go-winio +Version: v0.4.15 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!microsoft/go-winio@v0.4.15-0.20190919025122-fc70bd9a86b5/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/bi-zone/go-winio@v0.4.15/LICENSE: The MIT License (MIT) diff --git a/go.mod b/go.mod index 720690f1f2f..2ef65606319 100644 --- a/go.mod +++ b/go.mod @@ -189,6 +189,7 @@ require ( replace ( github.com/Azure/go-autorest => github.com/Azure/go-autorest v12.2.0+incompatible + github.com/Microsoft/go-winio => github.com/bi-zone/go-winio v0.4.15 github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20200629123429-0e7b69039eec github.com/cucumber/godog => github.com/cucumber/godog v0.8.1 github.com/docker/docker => github.com/docker/engine v0.0.0-20191113042239-ea84732a7725 diff --git a/go.sum b/go.sum index 5c01c612fe3..97f31d79292 100644 --- a/go.sum +++ b/go.sum @@ -80,8 +80,6 @@ github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -132,6 +130,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bi-zone/go-winio v0.4.15 h1:viLHm+U7bzIkfVHuWgc3Wp/sT5zaLoRG7XdOEy1b12w= +github.com/bi-zone/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/blakerouse/service v1.1.1-0.20200924160513-057808572ffa h1:aXHPZwx8Y5z8r+1WPylnu095usTf6QSshaHs6nVMBc0= github.com/blakerouse/service v1.1.1-0.20200924160513-057808572ffa/go.mod h1:RrJI2xn5vve/r32U5suTbeaSGoMU6GbNPoj36CVYcHc= github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 h1:oMCHnXa6CCCafdPDbMh/lWRhRByN0VFLvv+g+ayx1SI= diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 5284da8db2b..fa0198a6628 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -19,6 +19,7 @@ - Partial extracted beat result in failure to spawn beat {issue}21718[21718] - Use local temp instead of system one {pull}21883[21883] - Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] +- Fix issue with named pipes on Windows 7 {pull}21931[21931] ==== New features From b2d1929bff02393360cc0292975b82da448151c3 Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Mon, 19 Oct 2020 18:02:06 +0300 Subject: [PATCH 03/40] Stop storing stateless kubernetes keystores (#21880) --- CHANGELOG.next.asciidoc | 1 + .../k8skeystore/kubernetes_keystore.go | 20 +++++-------------- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 51255305f42..9f5d45e6a8e 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -375,6 +375,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix retrieving resources by ID for the azure module. {pull}21711[21711] {issue}21707[21707] - Use timestamp from CloudWatch API when creating events. {pull}21498[21498] - Report the correct windows events for system/filesystem {pull}21758[21758] +- Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] *Packetbeat* diff --git a/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go b/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go index 616525b432a..e17b4258232 100644 --- a/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go +++ b/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go @@ -30,14 +30,10 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" ) -type KubernetesKeystores map[string]keystore.Keystore - -// KubernetesKeystoresRegistry holds KubernetesKeystores for known namespaces. Once a Keystore for one k8s namespace -// is initialized it will be reused every time it is needed. +// KubernetesKeystoresRegistry implements a Provider for Keystore. type KubernetesKeystoresRegistry struct { - kubernetesKeystores KubernetesKeystores - logger *logp.Logger - client k8s.Interface + logger *logp.Logger + client k8s.Interface } // KubernetesSecretsKeystore allows to retrieve passwords from Kubernetes secrets for a given namespace @@ -56,9 +52,8 @@ func Factoryk8s(keystoreNamespace string, ks8client k8s.Interface, logger *logp. // NewKubernetesKeystoresRegistry initializes a KubernetesKeystoresRegistry func NewKubernetesKeystoresRegistry(logger *logp.Logger, client k8s.Interface) keystore.Provider { return &KubernetesKeystoresRegistry{ - kubernetesKeystores: KubernetesKeystores{}, - logger: logger, - client: client, + logger: logger, + client: client, } } @@ -75,12 +70,7 @@ func (kr *KubernetesKeystoresRegistry) GetKeystore(event bus.Event) keystore.Key namespace = ns.(string) } if namespace != "" { - // either retrieve already stored keystore or create a new one for the namespace - if storedKeystore, ok := kr.kubernetesKeystores[namespace]; ok { - return storedKeystore - } k8sKeystore, _ := Factoryk8s(namespace, kr.client, kr.logger) - kr.kubernetesKeystores["namespace"] = k8sKeystore return k8sKeystore } kr.logger.Debugf("Cannot retrieve kubernetes namespace from event: %s", event) From e29c3fae4adbdd066e58ca0df00ed8ca24b74d0d Mon Sep 17 00:00:00 2001 From: Niels Hofmans Date: Mon, 19 Oct 2020 17:19:31 +0200 Subject: [PATCH 04/40] filebeat: add SSL options to checkpoint module (#19560) * feat(firewall): add tls config * feat(firewall): add vars to manifest * chore(checkpoint): add tls to example * chore(checkpoint): run mage fmt update * cleanup(checkpoint): remove obsolete log_level * refactor(checkpoint): move to .ssl * chore(x-pack): revert ide fix * chore(changelog): add f5 asm ref * revert(changelog): remove f5 asm mod * chore(changelog): add checkpoint tls * chore: fix lint warnings * Undo some changes and move docs to checkpoint * Move changelog entry Co-authored-by: Marc Guasch --- CHANGELOG.next.asciidoc | 1 + filebeat/docs/modules/checkpoint.asciidoc | 12 ++++++++++++ .../filebeat/module/checkpoint/_meta/docs.asciidoc | 12 ++++++++++++ .../module/checkpoint/firewall/config/firewall.yml | 8 +++++++- .../filebeat/module/checkpoint/firewall/manifest.yml | 1 + 5 files changed, 33 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 9f5d45e6a8e..fd297059639 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -631,6 +631,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - New juniper.srx dataset for Juniper SRX logs. {pull}20017[20017] - Adding support for Microsoft 365 Defender (Microsoft Threat Protection) {pull}21446[21446] - Adding support for FIPS in s3 input {pull}21446[21446] +- Add SSL option to checkpoint module {pull}19560[19560] *Heartbeat* diff --git a/filebeat/docs/modules/checkpoint.asciidoc b/filebeat/docs/modules/checkpoint.asciidoc index c4e453b452d..841e66fdbab 100644 --- a/filebeat/docs/modules/checkpoint.asciidoc +++ b/filebeat/docs/modules/checkpoint.asciidoc @@ -70,6 +70,18 @@ A list of tags to include in events. Including `forwarded` indicates that the events did not originate on this host and causes `host.name` to not be added to events. Defaults to `[checkpoint-firewall, forwarded]`. +*`var.ssl`*:: + +The SSL/TLS configuration for the filebeat instance. This can be used to enforce mutual TLS. +```yaml +ssl: + enabled: true + certificate_authorities: ["my-ca.pem"] + certificate: "filebeat-cert.pem" + key: "filebeat-key.pem" + client_authentication: "required" +``` + [float] ==== Check Point devices diff --git a/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc b/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc index ecd8e0d3e81..385206f03ff 100644 --- a/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc +++ b/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc @@ -65,6 +65,18 @@ A list of tags to include in events. Including `forwarded` indicates that the events did not originate on this host and causes `host.name` to not be added to events. Defaults to `[checkpoint-firewall, forwarded]`. +*`var.ssl`*:: + +The SSL/TLS configuration for the filebeat instance. This can be used to enforce mutual TLS. +```yaml +ssl: + enabled: true + certificate_authorities: ["my-ca.pem"] + certificate: "filebeat-cert.pem" + key: "filebeat-key.pem" + client_authentication: "required" +``` + [float] ==== Check Point devices diff --git a/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml b/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml index 4892400a8b9..9ac586c6b5c 100644 --- a/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml +++ b/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml @@ -1,4 +1,10 @@ -{{ if eq .input "syslog" }} +{{ if .ssl }} + +type: tcp +host: "{{.syslog_host}}:{{.syslog_port}}" +ssl: {{ .ssl | tojson }} + +{{ else if eq .input "syslog" }} type: udp host: "{{.syslog_host}}:{{.syslog_port}}" diff --git a/x-pack/filebeat/module/checkpoint/firewall/manifest.yml b/x-pack/filebeat/module/checkpoint/firewall/manifest.yml index 849c20fafe2..69301541669 100644 --- a/x-pack/filebeat/module/checkpoint/firewall/manifest.yml +++ b/x-pack/filebeat/module/checkpoint/firewall/manifest.yml @@ -9,6 +9,7 @@ var: default: 9001 - name: input default: syslog + - name: ssl ingest_pipeline: - ingest/pipeline.yml From a79dddc8f9dcf5bad68f4a26e67840403e9e2cf7 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Mon, 19 Oct 2020 17:44:42 +0200 Subject: [PATCH 05/40] Fix TestDockerStart flaky test (#21681) Some changes are done to give more resilience to the test: * Wait till image pull is finished, and retry in case of failure. * Checked events are filtered by container id instead of image name, so tests are not affected by other containers that may be running in the system. * Check timeout is for all events now, instead of being reset after an event is received. * Container is removed after test is finished. --- .../docker/docker_integration_test.go | 20 ++++----- libbeat/tests/docker/docker.go | 41 ++++++++++++++++--- 2 files changed, 46 insertions(+), 15 deletions(-) diff --git a/libbeat/autodiscover/providers/docker/docker_integration_test.go b/libbeat/autodiscover/providers/docker/docker_integration_test.go index bbb2bc979bc..898f3cd254c 100644 --- a/libbeat/autodiscover/providers/docker/docker_integration_test.go +++ b/libbeat/autodiscover/providers/docker/docker_integration_test.go @@ -36,8 +36,6 @@ import ( // Test docker start emits an autodiscover event func TestDockerStart(t *testing.T) { - t.Skip("#20360 Flaky TestDockerStart skipped") - log := logp.NewLogger("docker") d, err := dk.NewClient() @@ -70,15 +68,17 @@ func TestDockerStart(t *testing.T) { // Start cmd := []string{"echo", "Hi!"} labels := map[string]string{"label": "foo", "label.child": "bar"} - ID, err := d.ContainerStart("busybox", cmd, labels) + ID, err := d.ContainerStart("busybox:latest", cmd, labels) if err != nil { t.Fatal(err) } - checkEvent(t, listener, true) + defer d.ContainerRemove(ID) + + checkEvent(t, listener, ID, true) // Kill d.ContainerKill(ID) - checkEvent(t, listener, false) + checkEvent(t, listener, ID, false) } func getValue(e bus.Event, key string) interface{} { @@ -89,12 +89,13 @@ func getValue(e bus.Event, key string) interface{} { return val } -func checkEvent(t *testing.T, listener bus.Listener, start bool) { +func checkEvent(t *testing.T, listener bus.Listener, id string, start bool) { + timeout := time.After(60 * time.Second) for { select { case e := <-listener.Events(): // Ignore any other container - if getValue(e, "docker.container.image") != "busybox" { + if getValue(e, "container.id") != id { continue } if start { @@ -104,7 +105,7 @@ func checkEvent(t *testing.T, listener bus.Listener, start bool) { assert.Equal(t, getValue(e, "stop"), true) assert.Nil(t, getValue(e, "start")) } - assert.Equal(t, getValue(e, "container.image.name"), "busybox") + assert.Equal(t, getValue(e, "container.image.name"), "busybox:latest") // labels.dedot=true by default assert.Equal(t, common.MapStr{ @@ -122,8 +123,7 @@ func checkEvent(t *testing.T, listener bus.Listener, start bool) { assert.Equal(t, getValue(e, "docker.container.name"), getValue(e, "meta.container.name")) assert.Equal(t, getValue(e, "docker.container.image"), getValue(e, "meta.container.image.name")) return - - case <-time.After(10 * time.Second): + case <-timeout: t.Fatal("Timeout waiting for provider events") return } diff --git a/libbeat/tests/docker/docker.go b/libbeat/tests/docker/docker.go index 888347c5cc7..8bb5efadbfa 100644 --- a/libbeat/tests/docker/docker.go +++ b/libbeat/tests/docker/docker.go @@ -19,6 +19,8 @@ package docker import ( "context" + "io" + "io/ioutil" "github.com/pkg/errors" @@ -42,13 +44,12 @@ func NewClient() (Client, error) { // ContainerStart pulls and starts the given container func (c Client) ContainerStart(image string, cmd []string, labels map[string]string) (string, error) { - ctx := context.Background() - respBody, err := c.cli.ImagePull(ctx, image, types.ImagePullOptions{}) + err := c.imagePull(image) if err != nil { - return "", errors.Wrapf(err, "pullling image %s", image) + return "", err } - defer respBody.Close() + ctx := context.Background() resp, err := c.cli.ContainerCreate(ctx, &container.Config{ Image: image, Cmd: cmd, @@ -65,6 +66,36 @@ func (c Client) ContainerStart(image string, cmd []string, labels map[string]str return resp.ID, nil } +// imagePull pulls an image +func (c Client) imagePull(image string) (err error) { + ctx := context.Background() + _, _, err = c.cli.ImageInspectWithRaw(ctx, image) + if err == nil { + // Image already available, do nothing + return nil + } + for retry := 0; retry < 3; retry++ { + err = func() error { + respBody, err := c.cli.ImagePull(ctx, image, types.ImagePullOptions{}) + if err != nil { + return errors.Wrapf(err, "pullling image %s", image) + } + defer respBody.Close() + + // Read all the response, to be sure that the pull has finished before returning. + _, err = io.Copy(ioutil.Discard, respBody) + if err != nil { + return errors.Wrapf(err, "reading response for image %s", image) + } + return nil + }() + if err == nil { + break + } + } + return +} + // ContainerWait waits for a container to finish func (c Client) ContainerWait(ID string) error { ctx := context.Background() @@ -89,7 +120,7 @@ func (c Client) ContainerKill(ID string) error { return c.cli.ContainerKill(ctx, ID, "KILL") } -// ContainerRemove kills and removed the given container +// ContainerRemove kills and removes the given container func (c Client) ContainerRemove(ID string) error { ctx := context.Background() return c.cli.ContainerRemove(ctx, ID, types.ContainerRemoveOptions{ From 6955665227cb13504c52a01d3cbccd0a28c7ed9e Mon Sep 17 00:00:00 2001 From: Alex K <8418476+fearful-symmetry@users.noreply.github.com> Date: Mon, 19 Oct 2020 13:24:56 -0700 Subject: [PATCH 06/40] fix diskio and memory bugs under windows (#21992) --- metricbeat/module/system/diskio/diskio.go | 3 ++- metricbeat/module/system/memory/memory.go | 6 +++--- metricbeat/module/system/process/process.go | 4 +++- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/metricbeat/module/system/diskio/diskio.go b/metricbeat/module/system/diskio/diskio.go index 1359180cff6..4a7e2e2b5fe 100644 --- a/metricbeat/module/system/diskio/diskio.go +++ b/metricbeat/module/system/diskio/diskio.go @@ -21,6 +21,7 @@ package diskio import ( "fmt" + "runtime" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/metric/system/diskio" @@ -114,7 +115,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { diskWriteBytes += counters.WriteBytes //Add linux-only data if agent is off as not to make breaking changes. - if !m.IsAgent { + if !m.IsAgent && runtime.GOOS == "linux" { result, err := m.statistics.CalcIOStatistics(counters) if err != nil { return errors.Wrap(err, "error calculating iostat") diff --git a/metricbeat/module/system/memory/memory.go b/metricbeat/module/system/memory/memory.go index 27e76b85489..26c6bea1867 100644 --- a/metricbeat/module/system/memory/memory.go +++ b/metricbeat/module/system/memory/memory.go @@ -42,7 +42,7 @@ func init() { // MetricSet for fetching system memory metrics. type MetricSet struct { mb.BaseMetricSet - IsFleet bool + IsAgent bool } // New is a mb.MetricSetFactory that returns a memory.MetricSet. @@ -53,7 +53,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, fmt.Errorf("unexpected module type") } - return &MetricSet{BaseMetricSet: base, IsFleet: systemModule.IsAgent}, nil + return &MetricSet{BaseMetricSet: base, IsAgent: systemModule.IsAgent}, nil } // Fetch fetches memory metrics from the OS. @@ -117,7 +117,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { } // for backwards compatibility, only report if we're not in fleet mode - if !m.IsFleet { + if !m.IsAgent { err := linux.FetchLinuxMemStats(memory) if err != nil { return errors.Wrap(err, "error getting page stats") diff --git a/metricbeat/module/system/process/process.go b/metricbeat/module/system/process/process.go index 804c62d06d6..c99ffaa1123 100644 --- a/metricbeat/module/system/process/process.go +++ b/metricbeat/module/system/process/process.go @@ -156,10 +156,12 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { // There's some more Windows memory quirks we need to deal with. // "rss" is a linux concept, but "wss" is a direct match on Windows. // "share" is also unavailable on Windows. + if runtime.GOOS == "windows" { + proc.Delete("memory.share") + } if m.IsAgent { if runtime.GOOS == "windows" { - proc.Delete("memory.share") if setSize := getAndRemove(proc, "memory.rss"); setSize != nil { proc.Put("memory.wss", setSize) } From fa50a44556a2c7d7f78855f98e12bf13848a0f9a Mon Sep 17 00:00:00 2001 From: Mariana Dima Date: Tue, 20 Oct 2020 09:28:30 +0200 Subject: [PATCH 07/40] Azure storage metricset values not inside the metricset name (#21845) * mofidy doc * fix * changelog --- CHANGELOG.next.asciidoc | 1 + x-pack/metricbeat/module/azure/storage/storage.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index fd297059639..6ead76346b3 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -375,6 +375,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix retrieving resources by ID for the azure module. {pull}21711[21711] {issue}21707[21707] - Use timestamp from CloudWatch API when creating events. {pull}21498[21498] - Report the correct windows events for system/filesystem {pull}21758[21758] +- Fix azure storage event format. {pull}21845[21845] - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] *Packetbeat* diff --git a/x-pack/metricbeat/module/azure/storage/storage.go b/x-pack/metricbeat/module/azure/storage/storage.go index 9f54871b319..4178b911d11 100644 --- a/x-pack/metricbeat/module/azure/storage/storage.go +++ b/x-pack/metricbeat/module/azure/storage/storage.go @@ -41,6 +41,8 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { if err != nil { return nil, err } + // set default resource type to indicate this is not the generic monitor metricset + ms.Client.Config.DefaultResourceType = defaultStorageAccountNamespace // if no options are entered we will retrieve all the vm's from the entire subscription if len(ms.Client.Config.Resources) == 0 { ms.Client.Config.Resources = []azure.ResourceConfig{ From e7fd212d8c4974927a295002b399a09401a629f7 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Tue, 20 Oct 2020 11:28:32 +0200 Subject: [PATCH 08/40] [Ingest Manager] Always try snapshot repo for agent upgrade (#21951) [Ingest Manager] Always try snapshot repo for agent upgrade (#21951) --- x-pack/elastic-agent/pkg/agent/application/stream.go | 4 ++-- .../pkg/agent/application/upgrade/step_download.go | 4 ++-- x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go | 3 --- .../pkg/artifact/download/localremote/downloader.go | 4 ++-- .../pkg/artifact/download/localremote/verifier.go | 4 ++-- 5 files changed, 8 insertions(+), 11 deletions(-) diff --git a/x-pack/elastic-agent/pkg/agent/application/stream.go b/x-pack/elastic-agent/pkg/agent/application/stream.go index 41999fcb832..784038e77ab 100644 --- a/x-pack/elastic-agent/pkg/agent/application/stream.go +++ b/x-pack/elastic-agent/pkg/agent/application/stream.go @@ -56,9 +56,9 @@ func streamFactory(ctx context.Context, cfg *configuration.SettingsConfig, srv * } func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) (*operation.Operator, error) { - fetcher := downloader.NewDownloader(log, config.DownloadConfig) + fetcher := downloader.NewDownloader(log, config.DownloadConfig, false) allowEmptyPgp, pgp := release.PGP() - verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp) + verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp, false) if err != nil { return nil, errors.New(err, "initiating verifier") } diff --git a/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go b/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go index 3aea96da0ab..0294308ff3a 100644 --- a/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go +++ b/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go @@ -27,12 +27,12 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri } allowEmptyPgp, pgp := release.PGP() - verifier, err := downloader.NewVerifier(u.log, &settings, allowEmptyPgp, pgp) + verifier, err := downloader.NewVerifier(u.log, &settings, allowEmptyPgp, pgp, true) if err != nil { return "", errors.New(err, "initiating verifier") } - fetcher := downloader.NewDownloader(u.log, &settings) + fetcher := downloader.NewDownloader(u.log, &settings, true) path, err := fetcher.Download(ctx, agentName, agentArtifactName, version) if err != nil { return "", errors.New(err, "failed upgrade of agent binary") diff --git a/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go b/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go index 1a21bc154a1..d7e69fc3972 100644 --- a/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go +++ b/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go @@ -183,9 +183,6 @@ func (u *Upgrader) Ack(ctx context.Context) error { } func (u *Upgrader) sourceURI(version, retrievedURI string) (string, error) { - if strings.HasSuffix(version, "-SNAPSHOT") && retrievedURI == "" { - return "", errors.New("snapshot upgrade requires source uri", errors.TypeConfig) - } if retrievedURI != "" { return retrievedURI, nil } diff --git a/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go b/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go index 6448af25aca..ba82195ffbd 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go +++ b/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go @@ -17,12 +17,12 @@ import ( // NewDownloader creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewDownloader(log *logger.Logger, config *artifact.Config) download.Downloader { +func NewDownloader(log *logger.Logger, config *artifact.Config, forceSnapshot bool) download.Downloader { downloaders := make([]download.Downloader, 0, 3) downloaders = append(downloaders, fs.NewDownloader(config)) // try snapshot repo before official - if release.Snapshot() { + if release.Snapshot() || forceSnapshot { snapDownloader, err := snapshot.NewDownloader(config) if err != nil { log.Error(err) diff --git a/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go b/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go index 4f33cbbdb8e..30517d12d3d 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go +++ b/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go @@ -17,7 +17,7 @@ import ( // NewVerifier creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool, pgp []byte) (download.Verifier, error) { +func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool, pgp []byte, forceSnapshot bool) (download.Verifier, error) { verifiers := make([]download.Verifier, 0, 3) fsVer, err := fs.NewVerifier(config, allowEmptyPgp, pgp) @@ -27,7 +27,7 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool verifiers = append(verifiers, fsVer) // try snapshot repo before official - if release.Snapshot() { + if release.Snapshot() || forceSnapshot { snapshotVerifier, err := snapshot.NewVerifier(config, allowEmptyPgp, pgp) if err != nil { log.Error(err) From 0d5ef7b3ccdbd4c83d2a2df018b654164383ecee Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Tue, 20 Oct 2020 12:49:18 +0300 Subject: [PATCH 09/40] [Kubernetes] Remove redundant dockersock volume mount (#22009) --- CHANGELOG.next.asciidoc | 1 + deploy/kubernetes/metricbeat-kubernetes.yaml | 5 ----- deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml | 5 ----- 3 files changed, 1 insertion(+), 10 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6ead76346b3..fa8d1fc2791 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -377,6 +377,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Report the correct windows events for system/filesystem {pull}21758[21758] - Fix azure storage event format. {pull}21845[21845] - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] +- [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] *Packetbeat* diff --git a/deploy/kubernetes/metricbeat-kubernetes.yaml b/deploy/kubernetes/metricbeat-kubernetes.yaml index 32d1010f4d0..db1eb25d7a5 100644 --- a/deploy/kubernetes/metricbeat-kubernetes.yaml +++ b/deploy/kubernetes/metricbeat-kubernetes.yaml @@ -189,8 +189,6 @@ spec: - name: modules mountPath: /usr/share/metricbeat/modules.d readOnly: true - - name: dockersock - mountPath: /var/run/docker.sock - name: proc mountPath: /hostfs/proc readOnly: true @@ -204,9 +202,6 @@ spec: - name: cgroup hostPath: path: /sys/fs/cgroup - - name: dockersock - hostPath: - path: /var/run/docker.sock - name: config configMap: defaultMode: 0640 diff --git a/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml b/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml index 0197fe136b6..34bcf536068 100644 --- a/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml +++ b/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml @@ -64,8 +64,6 @@ spec: - name: modules mountPath: /usr/share/metricbeat/modules.d readOnly: true - - name: dockersock - mountPath: /var/run/docker.sock - name: proc mountPath: /hostfs/proc readOnly: true @@ -79,9 +77,6 @@ spec: - name: cgroup hostPath: path: /sys/fs/cgroup - - name: dockersock - hostPath: - path: /var/run/docker.sock - name: config configMap: defaultMode: 0640 From 0bb45f25cc4de6849ec419f2cecfca2aaa193cf7 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Tue, 20 Oct 2020 13:57:25 +0100 Subject: [PATCH 10/40] [beats-tester][packaging] store packages in another location (#21903) --- .ci/beats-tester.groovy | 3 +++ .ci/packaging.groovy | 11 ++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.ci/beats-tester.groovy b/.ci/beats-tester.groovy index eb1357700b6..91781a98d31 100644 --- a/.ci/beats-tester.groovy +++ b/.ci/beats-tester.groovy @@ -54,6 +54,7 @@ pipeline { options { skipDefaultCheckout() } when { branch 'master' } steps { + // TODO: to use the git commit that triggered the upstream build runBeatsTesterJob(version: "${env.VERSION}-SNAPSHOT") } } @@ -61,6 +62,7 @@ pipeline { options { skipDefaultCheckout() } when { branch '*.x' } steps { + // TODO: to use the git commit that triggered the upstream build runBeatsTesterJob(version: "${env.VERSION}-SNAPSHOT") } } @@ -84,6 +86,7 @@ pipeline { } } steps { + // TODO: to use the git commit that triggered the upstream build runBeatsTesterJob(version: "${env.VERSION}-SNAPSHOT") } } diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 4145ee6bdd1..8936de2fb3e 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -327,7 +327,16 @@ def publishPackages(baseDir){ bucketUri = "gs://${JOB_GCS_BUCKET}/pull-requests/pr-${env.CHANGE_ID}" } def beatsFolderName = getBeatsName(baseDir) - googleStorageUpload(bucket: "${bucketUri}/${beatsFolderName}", + uploadPackages("${bucketUri}/${beatsFolderName}", baseDir) + + // Copy those files to another location with the sha commit to test them + // aftewords. + bucketUri = "gs://${JOB_GCS_BUCKET}/commits/${env.GIT_BASE_COMMIT}" + uploadPackages("${bucketUri}/${beatsFolderName}", baseDir) +} + +def uploadPackages(bucketUri, baseDir){ + googleStorageUpload(bucket: bucketUri, credentialsId: "${JOB_GCS_CREDENTIALS}", pathPrefix: "${baseDir}/build/distributions/", pattern: "${baseDir}/build/distributions/**/*", From 38add00bffb8565c1b5e0cfe17776934af3ef525 Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Tue, 20 Oct 2020 16:49:11 +0300 Subject: [PATCH 11/40] Fix Istio docs (#22019) Signed-off-by: chrismark --- metricbeat/docs/modules/istio.asciidoc | 4 ++-- x-pack/metricbeat/module/istio/_meta/docs.asciidoc | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/metricbeat/docs/modules/istio.asciidoc b/metricbeat/docs/modules/istio.asciidoc index c80e2d84c09..fee401e1983 100644 --- a/metricbeat/docs/modules/istio.asciidoc +++ b/metricbeat/docs/modules/istio.asciidoc @@ -10,8 +10,8 @@ beta[] This is the Istio module. This module is compatible with versions before `1.5` of Istio where microservices architecture is used. If using -versions priot to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used. -where the Istio module collects metrics from the +versions prior to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used where the Istio +module collects metrics from the Istio https://istio.io/v1.4/docs/tasks/observability/metrics/querying-metrics/#about-the-prometheus-add-on[prometheus exporters endpoints]. For versions after `1.5`, `istiod` and `proxy` metricsets can be used. diff --git a/x-pack/metricbeat/module/istio/_meta/docs.asciidoc b/x-pack/metricbeat/module/istio/_meta/docs.asciidoc index f3b1825a9b1..39eb93b4095 100644 --- a/x-pack/metricbeat/module/istio/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/istio/_meta/docs.asciidoc @@ -1,7 +1,7 @@ This is the Istio module. This module is compatible with versions before `1.5` of Istio where microservices architecture is used. If using -versions priot to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used. -where the Istio module collects metrics from the +versions prior to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used where the Istio +module collects metrics from the Istio https://istio.io/v1.4/docs/tasks/observability/metrics/querying-metrics/#about-the-prometheus-add-on[prometheus exporters endpoints]. For versions after `1.5`, `istiod` and `proxy` metricsets can be used. From 37dc557b2c04ab4c87eceea5271b6733e23d356e Mon Sep 17 00:00:00 2001 From: Lee Hinman <57081003+leehinman@users.noreply.github.com> Date: Tue, 20 Oct 2020 09:16:44 -0500 Subject: [PATCH 12/40] dynamically find librpm (#21936) - use elf header of rpm binary to find version of librpm - use librpm.so as fallback, provided by rpm-devel Closes #19287 --- .../module/system/package/rpm_linux.go | 63 ++++++++++++++----- 1 file changed, 47 insertions(+), 16 deletions(-) diff --git a/x-pack/auditbeat/module/system/package/rpm_linux.go b/x-pack/auditbeat/module/system/package/rpm_linux.go index fa6fc66f4cd..6e5df7e0c6e 100644 --- a/x-pack/auditbeat/module/system/package/rpm_linux.go +++ b/x-pack/auditbeat/module/system/package/rpm_linux.go @@ -10,9 +10,12 @@ import ( "errors" "fmt" "runtime" + "strings" "time" "unsafe" + "debug/elf" + "github.com/coreos/pkg/dlopen" ) @@ -204,29 +207,57 @@ func (lib *librpm) close() error { return nil } -func openLibrpm() (*librpm, error) { - var librpmNames = []string{ - "librpm.so", // with rpm-devel installed - "librpm.so.9", // Fedora 31/32 - "librpm.so.8", // Fedora 29/30 - "librpm.so.3", // CentOS 7 - "librpm.so.1", // CentOS 6 - - // Following for completeness, but not explicitly tested - "librpm.so.10", - "librpm.so.7", - "librpm.so.6", - "librpm.so.5", - "librpm.so.4", - "librpm.so.2", +// getLibrpmNames determines the versions of librpm.so that are +// installed on a system. rpm-devel rpm installs the librpm.so +// symbolic link to the correct version of librpm, but that isn't a +// required package. rpm will install librpm.so.X, where X is the +// version number. getLibrpmNames looks at the elf header for the rpm +// binary to determine what version of librpm.so it is linked against. +func getLibrpmNames() []string { + var rpmPaths = []string{ + "/usr/bin/rpm", + "/bin/rpm", + } + var libNames = []string{ + "librpm.so", } + var rpmElf *elf.File + var err error + + for _, path := range rpmPaths { + rpmElf, err = elf.Open(path) + if err == nil { + break + } + } + if err != nil { + return libNames + } + + impLibs, err := rpmElf.ImportedLibraries() + if err != nil { + return libNames + } + + for _, lib := range impLibs { + if strings.Contains(lib, "librpm.so") { + libNames = append(libNames, lib) + } + } + + return libNames +} + +func openLibrpm() (*librpm, error) { var librpm librpm var err error + librpmNames := getLibrpmNames() + librpm.handle, err = dlopen.GetHandle(librpmNames) if err != nil { - return nil, err + return nil, fmt.Errorf("Couldn't open %v", librpmNames) } librpm.rpmtsCreate, err = librpm.handle.GetSymbolPointer("rpmtsCreate") From 283641ec6ad66e09c2bf04be85b062764c6ce711 Mon Sep 17 00:00:00 2001 From: EamonnTP Date: Tue, 20 Oct 2020 16:08:23 +0100 Subject: [PATCH 13/40] Update links (#22012) --- libbeat/docs/getting-started.asciidoc | 4 ++-- libbeat/docs/howto/load-dashboards.asciidoc | 4 ++-- libbeat/docs/overview.asciidoc | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/libbeat/docs/getting-started.asciidoc b/libbeat/docs/getting-started.asciidoc index b1a85fddb46..5291f755e5b 100644 --- a/libbeat/docs/getting-started.asciidoc +++ b/libbeat/docs/getting-started.asciidoc @@ -13,5 +13,5 @@ Each Beat is a separately installable product. To learn how to get started, see: * {winlogbeat-ref}/winlogbeat-installation-configuration.html[Winlogbeat] If you're planning to use the {metrics-app} or the {logs-app} in {kib}, -also see the {metrics-guide}[Metrics Monitoring Guide] -and the {logs-guide}[Logs Monitoring Guide]. +see {observability-guide}/analyze-metrics.html[Analyze metrics] +and {observability-guide}/monitor-logs.html[Monitor logs]. \ No newline at end of file diff --git a/libbeat/docs/howto/load-dashboards.asciidoc b/libbeat/docs/howto/load-dashboards.asciidoc index 781789d3ae4..c03b512d636 100644 --- a/libbeat/docs/howto/load-dashboards.asciidoc +++ b/libbeat/docs/howto/load-dashboards.asciidoc @@ -15,8 +15,8 @@ ifdef::has_solutions[] TIP: For deeper observability into your infrastructure, you can use the {metrics-app} and the {logs-app} in {kib}. -For more details, see the {metrics-guide}[Metrics Monitoring Guide] -and the {logs-guide}[Logs Monitoring Guide]. +For more details, see {observability-guide}/analyze-metrics.html[Analyze metrics] +and {observability-guide}/monitor-logs.html[Monitor logs]. endif::has_solutions[] {beatname_uc} comes packaged with example Kibana dashboards, visualizations, diff --git a/libbeat/docs/overview.asciidoc b/libbeat/docs/overview.asciidoc index 11dc10f2b8f..bdc46aaaf28 100644 --- a/libbeat/docs/overview.asciidoc +++ b/libbeat/docs/overview.asciidoc @@ -28,8 +28,8 @@ To get started, see <>. Want to get up and running quickly with infrastructure metrics monitoring and centralized log analytics? Try out the {metrics-app} and the {logs-app} in {kib}. -For more details, see the {metrics-guide}[Metrics Monitoring Guide] -and the {logs-guide}[Logs Monitoring Guide]. +For more details, see {observability-guide}/analyze-metrics.html[Analyze metrics] +and {observability-guide}/monitor-logs.html[Monitor logs]. [float] === Need to capture other kinds of data? From e0d06541847dd27b04d2ac328dadf73ac7f883d3 Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Tue, 20 Oct 2020 11:52:56 -0400 Subject: [PATCH 14/40] Document auditbeat system process module config (#21766) The documentation for the system/process dataset was missing information on the configuration options. Closes #16869 --- x-pack/auditbeat/docs/modules/system.asciidoc | 2 +- .../module/system/_meta/docs.asciidoc | 2 +- .../module/system/process/_meta/docs.asciidoc | 22 ++++++++++++++++++- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/x-pack/auditbeat/docs/modules/system.asciidoc b/x-pack/auditbeat/docs/modules/system.asciidoc index 15eafc34116..e850c065197 100644 --- a/x-pack/auditbeat/docs/modules/system.asciidoc +++ b/x-pack/auditbeat/docs/modules/system.asciidoc @@ -97,7 +97,7 @@ This module also supports the <> described later. -*`state.period`*:: The frequency at which the datasets send full state information. +*`state.period`*:: The interval at which the datasets send full state information. This option can be overridden per dataset using `{dataset}.state.period`. *`user.detect_password_changes`*:: If the `user` dataset is configured and diff --git a/x-pack/auditbeat/module/system/_meta/docs.asciidoc b/x-pack/auditbeat/module/system/_meta/docs.asciidoc index 083435d94ae..a2a36987c51 100644 --- a/x-pack/auditbeat/module/system/_meta/docs.asciidoc +++ b/x-pack/auditbeat/module/system/_meta/docs.asciidoc @@ -90,7 +90,7 @@ This module also supports the <> described later. -*`state.period`*:: The frequency at which the datasets send full state information. +*`state.period`*:: The interval at which the datasets send full state information. This option can be overridden per dataset using `{dataset}.state.period`. *`user.detect_password_changes`*:: If the `user` dataset is configured and diff --git a/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc b/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc index e1d930e1fbf..e84f7246933 100644 --- a/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc +++ b/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc @@ -2,10 +2,30 @@ beta[] -This is the `process` dataset of the system module. +This is the `process` dataset of the system module. It generates an event when +a process starts and stops. It is implemented for Linux, macOS (Darwin), and Windows. +[float] +=== Configuration options + +*`process.state.period`*:: The interval at which the dataset sends full state +information. If set this will take precedence over `state.period`. The default +value is `12h`. + +*`process.hash.max_file_size`*:: The maximum size of a file in bytes for which +{beatname_uc} will compute hashes. Files larger than this size will not be +hashed. The default value is 100 MiB. For convenience units can be specified as +a suffix to the value. The supported units are `b` (default), `kib`, `kb`, +`mib`, `mb`, `gib`, `gb`, `tib`, `tb`, `pib`, `pb`, `eib`, and `eb`. + +*`process.hash.hash_types`*:: A list of hash types to compute when the file +changes. The supported hash types are `blake2b_256`, `blake2b_384`, +`blake2b_512`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, +`sha512_224`, `sha512_256`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, and +`xxh64`. The default value is `sha1`. + [float] ==== Example dashboard From 610e998c121e9453363a0f429c5f8d197eb1350d Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Tue, 20 Oct 2020 12:29:00 -0400 Subject: [PATCH 15/40] [Elastic Agent] Fix missing elastic_agent event data (#21994) * Fix fields. * Remove from monitoring decorator. * Add changelog. * Fix tests. * Fix tests. * Fix import. --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + .../pkg/agent/application/local_mode.go | 2 +- .../pkg/agent/application/managed_mode.go | 2 +- .../agent/application/monitoring_decorator.go | 1 - .../pkg/agent/application/stream.go | 8 +++-- .../pkg/agent/operation/common_test.go | 4 ++- .../pkg/agent/operation/monitoring.go | 30 +++++++++++++++++++ .../pkg/agent/operation/monitoring_test.go | 4 ++- .../pkg/agent/operation/operator.go | 4 +++ .../testdata/enabled_output_true-filebeat.yml | 8 ++--- .../testdata/enabled_true-filebeat.yml | 8 ++--- .../testdata/single_config-filebeat.yml | 16 +++++----- .../testdata/single_config-metricbeat.yml | 24 +++++++-------- .../pkg/agent/transpiler/rules.go | 8 ++--- .../pkg/agent/transpiler/rules_test.go | 16 +++++----- 15 files changed, 88 insertions(+), 48 deletions(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index fa0198a6628..3882ba19712 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -20,6 +20,7 @@ - Use local temp instead of system one {pull}21883[21883] - Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] - Fix issue with named pipes on Windows 7 {pull}21931[21931] +- Fix missing elastic_agent event data {pull}21994[21994] ==== New features diff --git a/x-pack/elastic-agent/pkg/agent/application/local_mode.go b/x-pack/elastic-agent/pkg/agent/application/local_mode.go index b58e260cab6..f0c4153f474 100644 --- a/x-pack/elastic-agent/pkg/agent/application/local_mode.go +++ b/x-pack/elastic-agent/pkg/agent/application/local_mode.go @@ -100,7 +100,7 @@ func newLocal( return nil, errors.New(err, "failed to initialize monitoring") } - router, err := newRouter(log, streamFactory(localApplication.bgContext, cfg.Settings, localApplication.srv, reporter, monitor)) + router, err := newRouter(log, streamFactory(localApplication.bgContext, agentInfo, cfg.Settings, localApplication.srv, reporter, monitor)) if err != nil { return nil, errors.New(err, "fail to initialize pipeline router") } diff --git a/x-pack/elastic-agent/pkg/agent/application/managed_mode.go b/x-pack/elastic-agent/pkg/agent/application/managed_mode.go index e38685741c3..fa31215f75d 100644 --- a/x-pack/elastic-agent/pkg/agent/application/managed_mode.go +++ b/x-pack/elastic-agent/pkg/agent/application/managed_mode.go @@ -154,7 +154,7 @@ func newManaged( return nil, errors.New(err, "failed to initialize monitoring") } - router, err := newRouter(log, streamFactory(managedApplication.bgContext, cfg.Settings, managedApplication.srv, combinedReporter, monitor)) + router, err := newRouter(log, streamFactory(managedApplication.bgContext, agentInfo, cfg.Settings, managedApplication.srv, combinedReporter, monitor)) if err != nil { return nil, errors.New(err, "fail to initialize pipeline router") } diff --git a/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go b/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go index 3fc49ef17d3..920b1a4b5bf 100644 --- a/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go +++ b/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go @@ -94,7 +94,6 @@ func getMonitoringRule(outputName string) *transpiler.RuleList { return transpiler.NewRuleList( transpiler.Copy(monitoringOutputSelector, outputKey), transpiler.Rename(fmt.Sprintf("%s.%s", outputsKey, outputName), elasticsearchKey), - transpiler.InjectAgentInfo(), transpiler.Filter(monitoringKey, programsKey, outputKey), ) } diff --git a/x-pack/elastic-agent/pkg/agent/application/stream.go b/x-pack/elastic-agent/pkg/agent/application/stream.go index 784038e77ab..2d372ef4387 100644 --- a/x-pack/elastic-agent/pkg/agent/application/stream.go +++ b/x-pack/elastic-agent/pkg/agent/application/stream.go @@ -7,6 +7,7 @@ package application import ( "context" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configrequest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" @@ -40,10 +41,10 @@ func (b *operatorStream) Shutdown() { b.configHandler.Shutdown() } -func streamFactory(ctx context.Context, cfg *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) func(*logger.Logger, routingKey) (stream, error) { +func streamFactory(ctx context.Context, agentInfo *info.AgentInfo, cfg *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) func(*logger.Logger, routingKey) (stream, error) { return func(log *logger.Logger, id routingKey) (stream, error) { // new operator per stream to isolate processes without using tags - operator, err := newOperator(ctx, log, id, cfg, srv, r, m) + operator, err := newOperator(ctx, log, agentInfo, id, cfg, srv, r, m) if err != nil { return nil, err } @@ -55,7 +56,7 @@ func streamFactory(ctx context.Context, cfg *configuration.SettingsConfig, srv * } } -func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) (*operation.Operator, error) { +func newOperator(ctx context.Context, log *logger.Logger, agentInfo *info.AgentInfo, id routingKey, config *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) (*operation.Operator, error) { fetcher := downloader.NewDownloader(log, config.DownloadConfig, false) allowEmptyPgp, pgp := release.PGP() verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp, false) @@ -81,6 +82,7 @@ func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config return operation.NewOperator( ctx, log, + agentInfo, id, config, fetcher, diff --git a/x-pack/elastic-agent/pkg/agent/operation/common_test.go b/x-pack/elastic-agent/pkg/agent/operation/common_test.go index e9d40bece87..ea16cfe77b8 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/common_test.go +++ b/x-pack/elastic-agent/pkg/agent/operation/common_test.go @@ -13,6 +13,7 @@ import ( "time" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/program" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/stateresolver" @@ -48,6 +49,7 @@ func getTestOperator(t *testing.T, downloadPath string, installPath string, p *a } l := getLogger() + agentInfo, _ := info.NewAgentInfo() fetcher := &DummyDownloader{} verifier := &DummyVerifier{} @@ -67,7 +69,7 @@ func getTestOperator(t *testing.T, downloadPath string, installPath string, p *a t.Fatal(err) } - operator, err := NewOperator(context.Background(), l, "p1", operatorCfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, noop.NewMonitor()) + operator, err := NewOperator(context.Background(), l, agentInfo, "p1", operatorCfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, noop.NewMonitor()) if err != nil { t.Fatal(err) } diff --git a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go index 74d542d58e9..1959cd52818 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go +++ b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go @@ -206,6 +206,16 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i }, }, }, + { + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": o.agentInfo.AgentID(), + "version": o.agentInfo.Version(), + "snapshot": o.agentInfo.Snapshot(), + }, + }, + }, }, }, } @@ -240,6 +250,16 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i }, }, }, + { + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": o.agentInfo.AgentID(), + "version": o.agentInfo.Version(), + "snapshot": o.agentInfo.Snapshot(), + }, + }, + }, }, }) } @@ -290,6 +310,16 @@ func (o *Operator) getMonitoringMetricbeatConfig(output interface{}) (map[string }, }, }, + { + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": o.agentInfo.AgentID(), + "version": o.agentInfo.Version(), + "snapshot": o.agentInfo.Snapshot(), + }, + }, + }, }, }) } diff --git a/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go b/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go index eef904096f7..3ca6a5f6b14 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go +++ b/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go @@ -11,6 +11,7 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configrequest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/stateresolver" @@ -112,6 +113,7 @@ func getMonitorableTestOperator(t *testing.T, installPath string, m monitoring.M } l := getLogger() + agentInfo, _ := info.NewAgentInfo() fetcher := &DummyDownloader{} verifier := &DummyVerifier{} @@ -128,7 +130,7 @@ func getMonitorableTestOperator(t *testing.T, installPath string, m monitoring.M } ctx := context.Background() - operator, err := NewOperator(ctx, l, "p1", cfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, m) + operator, err := NewOperator(ctx, l, agentInfo, "p1", cfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, m) if err != nil { t.Fatal(err) } diff --git a/x-pack/elastic-agent/pkg/agent/operation/operator.go b/x-pack/elastic-agent/pkg/agent/operation/operator.go index b4938278821..1a39e73500e 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/operator.go +++ b/x-pack/elastic-agent/pkg/agent/operation/operator.go @@ -12,6 +12,7 @@ import ( "sync" "time" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configrequest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" @@ -43,6 +44,7 @@ type Operator struct { bgContext context.Context pipelineID string logger *logger.Logger + agentInfo *info.AgentInfo config *configuration.SettingsConfig handlers map[string]handleFunc stateResolver *stateresolver.StateResolver @@ -66,6 +68,7 @@ type Operator struct { func NewOperator( ctx context.Context, logger *logger.Logger, + agentInfo *info.AgentInfo, pipelineID string, config *configuration.SettingsConfig, fetcher download.Downloader, @@ -85,6 +88,7 @@ func NewOperator( config: config, pipelineID: pipelineID, logger: logger, + agentInfo: agentInfo, downloader: fetcher, verifier: verifier, installer: installer, diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml index 38b251d95dc..82a47adc999 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml @@ -17,11 +17,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: enabled: true diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml index 6e768db6aa4..1406a2dff65 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml @@ -18,11 +18,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: hosts: diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml index 01ee955e4ec..524d6451f28 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml @@ -19,11 +19,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false - type: log paths: - /var/log/hello3.log @@ -43,11 +43,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: hosts: diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml index d09e80accf1..2889e7605eb 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml @@ -16,11 +16,11 @@ metricbeat: fields: dataset: docker.status - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false - module: docker metricsets: [info] index: metrics-generic-default @@ -37,11 +37,11 @@ metricbeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false - module: apache metricsets: [info] index: metrics-generic-testing @@ -61,11 +61,11 @@ metricbeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: hosts: [127.0.0.1:9200, 127.0.0.1:9300] diff --git a/x-pack/elastic-agent/pkg/agent/transpiler/rules.go b/x-pack/elastic-agent/pkg/agent/transpiler/rules.go index 29ff1786d1e..42acd53d21a 100644 --- a/x-pack/elastic-agent/pkg/agent/transpiler/rules.go +++ b/x-pack/elastic-agent/pkg/agent/transpiler/rules.go @@ -715,11 +715,11 @@ func (r *InjectAgentInfoRule) Apply(agentInfo AgentInfo, ast *AST) error { // elastic.agent processorMap := &Dict{value: make([]Node, 0)} - processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "elastic"}}) + processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "elastic_agent"}}) processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ - &Key{name: "agent.id", value: &StrVal{value: agentInfo.AgentID()}}, - &Key{name: "agent.version", value: &StrVal{value: agentInfo.Version()}}, - &Key{name: "agent.snapshot", value: &BoolVal{value: agentInfo.Snapshot()}}, + &Key{name: "id", value: &StrVal{value: agentInfo.AgentID()}}, + &Key{name: "version", value: &StrVal{value: agentInfo.Version()}}, + &Key{name: "snapshot", value: &BoolVal{value: agentInfo.Snapshot()}}, }}}) addFieldsMap := &Dict{value: []Node{&Key{"add_fields", processorMap}}} processorsList.value = mergeStrategy("").InjectItem(processorsList.value, addFieldsMap) diff --git a/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go b/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go index d92ba0de985..0fb59107844 100644 --- a/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go +++ b/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go @@ -184,11 +184,11 @@ inputs: type: file processors: - add_fields: - target: elastic + target: elastic_agent fields: - agent.id: agent-id - agent.snapshot: false - agent.version: 8.0.0 + id: agent-id + snapshot: false + version: 8.0.0 - name: With processors type: file processors: @@ -197,11 +197,11 @@ inputs: fields: data: more - add_fields: - target: elastic + target: elastic_agent fields: - agent.id: agent-id - agent.snapshot: false - agent.version: 8.0.0 + id: agent-id + snapshot: false + version: 8.0.0 `, rule: &RuleList{ Rules: []Rule{ From a10dca7959a5c09391e853d6e8d3e45bbee0b10f Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Tue, 20 Oct 2020 10:32:11 -0600 Subject: [PATCH 16/40] [Filebeat] Add max_number_of_messages config parameter for S3 input (#21993) --- CHANGELOG.next.asciidoc | 1 + .../docs/inputs/input-aws-s3.asciidoc | 89 ++++++++++--------- x-pack/filebeat/input/s3/collector.go | 18 ++-- x-pack/filebeat/input/s3/config.go | 22 +++-- x-pack/filebeat/input/s3/input.go | 2 +- 5 files changed, 68 insertions(+), 64 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index fa8d1fc2791..f2750175969 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -634,6 +634,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Adding support for Microsoft 365 Defender (Microsoft Threat Protection) {pull}21446[21446] - Adding support for FIPS in s3 input {pull}21446[21446] - Add SSL option to checkpoint module {pull}19560[19560] +- Add max_number_of_messages config into s3 input. {pull}21993[21993] *Heartbeat* diff --git a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc index 5cbe4685cb8..3ea37b3c754 100644 --- a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc @@ -38,24 +38,32 @@ The `s3` input supports the following configuration options plus the <<{beatname_lc}-input-{type}-common-options>> described later. [float] -==== `queue_url` - -URL of the AWS SQS queue that messages will be received from. Required. - -[float] -==== `fips_enabled` - -Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. For example: `s3-fips.us-gov-east-1.amazonaws.com`. +==== `api_timeout` -[float] -==== `visibility_timeout` +The maximum duration of the AWS API call. If it exceeds the timeout, the AWS API +call will be interrupted. +The default AWS API call timeout for a message is 120 seconds. The minimum +is 0 seconds. The maximum is half of the visibility timeout value. -The duration that the received messages are hidden from subsequent -retrieve requests after being retrieved by a ReceiveMessage request. -This value needs to be a lot bigger than {beatname_uc} collection frequency so -if it took too long to read the s3 log, this sqs message will not be reprocessed. -The default visibility timeout for a message is 300 seconds. The minimum -is 0 seconds. The maximum is 12 hours. +["source","json"] +---- +{ + "Records": [ + { + "eventVersion": "1.07", + "eventTime": "2019-11-14T00:51:00Z", + "awsRegion": "us-east-1", + "eventID": "EXAMPLE8-9621-4d00-b913-beca2EXAMPLE", + }, + { + "eventVersion": "1.07", + "eventTime": "2019-11-14T00:52:00Z", + "awsRegion": "us-east-1", + "eventID": "EXAMPLEc-28be-486c-8928-49ce6EXAMPLE", + } + ] +} +---- [float] ==== `expand_event_list_from_field` @@ -93,40 +101,33 @@ file_selectors: - regex: '^AWSLogs/\d+/CloudTrail/' expand_event_list_from_field: 'Records' - regex: '^AWSLogs/\d+/CloudTrail-Digest' -``` ---- +[float] +==== `fips_enabled` + +Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. For example: `s3-fips.us-gov-east-1.amazonaws.com`. [float] -==== `api_timeout` +==== `max_number_of_messages` +The maximum number of messages to return. Amazon SQS never returns more messages +than this value (however, fewer messages might be returned). +Valid values: 1 to 10. Default: 5. -The maximum duration of AWS API can take. If it exceeds the timeout, AWS API -will be interrupted. -The default AWS API timeout for a message is 120 seconds. The minimum -is 0 seconds. The maximum is half of the visibility timeout value. +[float] +==== `queue_url` -["source","json"] ----- -{ - "Records": [ - { - "eventVersion": "1.07", - "eventTime": "2019-11-14T00:51:00Z", - "awsRegion": "us-east-1", - "eventID": "EXAMPLE8-9621-4d00-b913-beca2EXAMPLE", - ... - }, - { - "eventVersion": "1.07", - "eventTime": "2019-11-14T00:52:00Z", - "awsRegion": "us-east-1", - "eventID": "EXAMPLEc-28be-486c-8928-49ce6EXAMPLE", - ... - } - ] -} -``` ----- +URL of the AWS SQS queue that messages will be received from. Required. + +[float] +==== `visibility_timeout` + +The duration that the received messages are hidden from subsequent +retrieve requests after being retrieved by a ReceiveMessage request. +This value needs to be a lot bigger than {beatname_uc} collection frequency so +if it took too long to read the s3 log, this sqs message will not be reprocessed. +The default visibility timeout for a message is 300 seconds. The minimum +is 0 seconds. The maximum is 12 hours. [float] ==== `aws credentials` diff --git a/x-pack/filebeat/input/s3/collector.go b/x-pack/filebeat/input/s3/collector.go index 1b890513284..c3d3114c723 100644 --- a/x-pack/filebeat/input/s3/collector.go +++ b/x-pack/filebeat/input/s3/collector.go @@ -82,17 +82,11 @@ type s3Context struct { errC chan error } -var ( - // The maximum number of messages to return. Amazon SQS never returns more messages - // than this value (however, fewer messages might be returned). - maxNumberOfMessage uint8 = 10 - - // The duration (in seconds) for which the call waits for a message to arrive - // in the queue before returning. If a message is available, the call returns - // sooner than WaitTimeSeconds. If no messages are available and the wait time - // expires, the call returns successfully with an empty list of messages. - waitTimeSecond uint8 = 10 -) +// The duration (in seconds) for which the call waits for a message to arrive +// in the queue before returning. If a message is available, the call returns +// sooner than WaitTimeSeconds. If no messages are available and the wait time +// expires, the call returns successfully with an empty list of messages. +var waitTimeSecond uint8 = 10 func (c *s3Collector) run() { defer c.logger.Info("s3 input worker has stopped.") @@ -205,7 +199,7 @@ func (c *s3Collector) receiveMessage(svcSQS sqsiface.ClientAPI, visibilityTimeou &sqs.ReceiveMessageInput{ QueueUrl: &c.config.QueueURL, MessageAttributeNames: []string{"All"}, - MaxNumberOfMessages: awssdk.Int64(int64(maxNumberOfMessage)), + MaxNumberOfMessages: awssdk.Int64(int64(c.config.MaxNumberOfMessages)), VisibilityTimeout: &visibilityTimeout, WaitTimeSeconds: awssdk.Int64(int64(waitTimeSecond)), }) diff --git a/x-pack/filebeat/input/s3/config.go b/x-pack/filebeat/input/s3/config.go index cc3c5318289..6dc0746ce5f 100644 --- a/x-pack/filebeat/input/s3/config.go +++ b/x-pack/filebeat/input/s3/config.go @@ -13,13 +13,14 @@ import ( ) type config struct { + APITimeout time.Duration `config:"api_timeout"` + ExpandEventListFromField string `config:"expand_event_list_from_field"` + FileSelectors []FileSelectorCfg `config:"file_selectors"` + FipsEnabled bool `config:"fips_enabled"` + MaxNumberOfMessages int `config:"max_number_of_messages"` QueueURL string `config:"queue_url" validate:"nonzero,required"` VisibilityTimeout time.Duration `config:"visibility_timeout"` - FipsEnabled bool `config:"fips_enabled"` AwsConfig awscommon.ConfigAWS `config:",inline"` - ExpandEventListFromField string `config:"expand_event_list_from_field"` - APITimeout time.Duration `config:"api_timeout"` - FileSelectors []FileSelectorCfg `config:"file_selectors"` } // FileSelectorCfg defines type and configuration of FileSelectors @@ -31,9 +32,10 @@ type FileSelectorCfg struct { func defaultConfig() config { return config{ - VisibilityTimeout: 300 * time.Second, - APITimeout: 120 * time.Second, - FipsEnabled: false, + APITimeout: 120 * time.Second, + FipsEnabled: false, + MaxNumberOfMessages: 5, + VisibilityTimeout: 300 * time.Second, } } @@ -42,10 +44,12 @@ func (c *config) Validate() error { return fmt.Errorf("visibility timeout %v is not within the "+ "required range 0s to 12h", c.VisibilityTimeout) } + if c.APITimeout < 0 || c.APITimeout > c.VisibilityTimeout/2 { return fmt.Errorf("api timeout %v needs to be larger than"+ " 0s and smaller than half of the visibility timeout", c.APITimeout) } + for i := range c.FileSelectors { r, err := regexp.Compile(c.FileSelectors[i].RegexString) if err != nil { @@ -53,5 +57,9 @@ func (c *config) Validate() error { } c.FileSelectors[i].Regex = r } + + if c.MaxNumberOfMessages > 10 || c.MaxNumberOfMessages < 1 { + return fmt.Errorf(" max_number_of_messages %v needs to be between 1 and 10", c.MaxNumberOfMessages) + } return nil } diff --git a/x-pack/filebeat/input/s3/input.go b/x-pack/filebeat/input/s3/input.go index d76e5b8b728..36f160d759e 100644 --- a/x-pack/filebeat/input/s3/input.go +++ b/x-pack/filebeat/input/s3/input.go @@ -106,7 +106,7 @@ func (in *s3Input) createCollector(ctx v2.Context, pipeline beat.Pipeline) (*s3C } log.Debug("s3 service name = ", s3Servicename) - + log.Debug("s3 input config max_number_of_messages = ", in.config.MaxNumberOfMessages) return &s3Collector{ cancellation: ctxtool.FromCanceller(ctx.Cancelation), logger: log, From 5935293e6efa2bb3900fe31d58111f7e557e795a Mon Sep 17 00:00:00 2001 From: Luca Belluccini Date: Wed, 21 Oct 2020 02:22:16 +0200 Subject: [PATCH 17/40] [DOC] Add firewall as possible troubleshooting issue (#21743) * [DOC] Add firewall as possible troubleshooting issue In case a firewall closes long persistent connections between Beats & Logstash, errors such as `write tcp ... write: connection reset by peer` will be reported by a given Beat. This documentation page should be useful to identify this kind of issues. * Update shared-faq.asciidoc Amend * Update libbeat/docs/shared-faq.asciidoc Co-authored-by: DeDe Morton * Update libbeat/docs/shared-faq.asciidoc Co-authored-by: DeDe Morton * Update libbeat/docs/shared-faq.asciidoc Co-authored-by: DeDe Morton * Make title more descriptive Co-authored-by: Luca Belluccini Co-authored-by: DeDe Morton --- libbeat/docs/shared-faq.asciidoc | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/libbeat/docs/shared-faq.asciidoc b/libbeat/docs/shared-faq.asciidoc index 9aa8c3442c1..d6c48b73aa9 100644 --- a/libbeat/docs/shared-faq.asciidoc +++ b/libbeat/docs/shared-faq.asciidoc @@ -54,6 +54,27 @@ connect to the Lumberjack input plugin. To learn how to install and update plugins, see {logstash-ref}/working-with-plugins.html[Working with plugins]. endif::[] +ifndef::no-output-logstash[] +[[publishing-ls-fails-connection-reset-by-peer]] +=== Publishing to {ls} fails with "connection reset by peer" message + +{beatname_uc} requires a persistent TCP connection to {ls}. If a firewall interferes +with the connection, you might see errors like this: + +[source,shell] +---------------------------------------------------------------------- +Failed to publish events caused by: write tcp ... write: connection reset by peer +---------------------------------------------------------------------- + + +To solve the problem: + +* make sure the firewall is not closing connections between {beatname_uc} and {ls}, or +* set the `ttl` value in the <> to a value that's +lower than the maximum time allowed by the firewall, and set `pipelining` to 0 +(pipelining cannot be enabled when `ttl` is used). +endif::[] + ifndef::no-output-logstash[] [[metadata-missing]] === @metadata is missing in {ls} From 65df4e14ebacfd71fc24564385c5662cd8261786 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Wed, 21 Oct 2020 12:33:23 +0200 Subject: [PATCH 18/40] feat: package aliases for snapshots (#21960) * feat: push aliases for docker images * feat: build alias for snapshots * fix: only update alias on snapshots Co-authored-by: Jaime Soriano Pastor * fix: wrong image name for alias * fix: reuse variable as groovy does not hide variables by scope * chore: extract common logic to a method * Revert "fix: only update alias on snapshots" This reverts commit cff2cef82cb107bfddeca5caf225a9307db72135. * Revert "feat: build alias for snapshots" This reverts commit 707e0d71556553b15388adec0c7118ff89210ac9. * chore: do not push aliases for PRs Co-authored-by: Jaime Soriano Pastor --- .ci/packaging.groovy | 56 ++++++++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 20 deletions(-) diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 8936de2fb3e..91902595a3c 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -191,10 +191,14 @@ def pushCIDockerImages(){ } } -def tagAndPush(name){ +def tagAndPush(beatName){ def libbetaVer = sh(label: 'Get libbeat version', script: 'grep defaultBeatVersion ${BASE_DIR}/libbeat/version/version.go|cut -d "=" -f 2|tr -d \\"', returnStdout: true)?.trim() + def aliasVersion = "" if("${env.SNAPSHOT}" == "true"){ + aliasVersion = libbetaVer.substring(0, libbetaVer.lastIndexOf(".")) // remove third number in version + libbetaVer += "-SNAPSHOT" + aliasVersion += "-SNAPSHOT" } def tagName = "${libbetaVer}" @@ -207,25 +211,37 @@ def tagAndPush(name){ // supported image flavours def variants = ["", "-oss", "-ubi8"] variants.each { variant -> - def oldName = "${DOCKER_REGISTRY}/beats/${name}${variant}:${libbetaVer}" - def newName = "${DOCKER_REGISTRY}/observability-ci/${name}${variant}:${tagName}" - def commitName = "${DOCKER_REGISTRY}/observability-ci/${name}${variant}:${env.GIT_BASE_COMMIT}" - - def iterations = 0 - retryWithSleep(retries: 3, seconds: 5, backoff: true) { - iterations++ - def status = sh(label:'Change tag and push', script: """ - docker tag ${oldName} ${newName} - docker push ${newName} - docker tag ${oldName} ${commitName} - docker push ${commitName} - """, returnStatus: true) - - if ( status > 0 && iterations < 3) { - error('tag and push failed, retry') - } else if ( status > 0 ) { - log(level: 'WARN', text: "${name} doesn't have ${variant} docker images. See https://github.com/elastic/beats/pull/21621") - } + doTagAndPush(beatName, variant, libbetaVer, tagName) + doTagAndPush(beatName, variant, libbetaVer, "${env.GIT_BASE_COMMIT}") + + if (!isPR() && aliasVersion != "") { + doTagAndPush(beatName, variant, libbetaVer, aliasVersion) + } + } +} + +/** +* @param beatName name of the Beat +* @param variant name of the variant used to build the docker image name +* @param sourceTag tag to be used as source for the docker tag command, usually under the 'beats' namespace +* @param targetTag tag to be used as target for the docker tag command, usually under the 'observability-ci' namespace +*/ +def doTagAndPush(beatName, variant, sourceTag, targetTag) { + def sourceName = "${DOCKER_REGISTRY}/beats/${beatName}${variant}:${sourceTag}" + def targetName = "${DOCKER_REGISTRY}/observability-ci/${beatName}${variant}:${targetTag}" + + def iterations = 0 + retryWithSleep(retries: 3, seconds: 5, backoff: true) { + iterations++ + def status = sh(label: "Change tag and push ${targetName}", script: """ + docker tag ${sourceName} ${targetName} + docker push ${targetName} + """, returnStatus: true) + + if ( status > 0 && iterations < 3) { + error("tag and push failed for ${beatName}, retry") + } else if ( status > 0 ) { + log(level: 'WARN', text: "${beatName} doesn't have ${variant} docker images. See https://github.com/elastic/beats/pull/21621") } } } From bb50d32ead2945b5c982e7975ab6ef6b3625860a Mon Sep 17 00:00:00 2001 From: William Deurwaarder Date: Wed, 21 Oct 2020 14:45:12 +0200 Subject: [PATCH 19/40] Prevent log input from sending duplicate messages due to file renaming (#21911) Input:Log: Reset TTL of registry state when a file is renamed. In some rare cases the registry state is marked for removal (TTL is set to 0) while the file is only renamed. Log detects the renaming of the file and updates the name of the file. As the file still exists it should also update the TTL of the renamed file. --- filebeat/input/log/input.go | 1 + 1 file changed, 1 insertion(+) diff --git a/filebeat/input/log/input.go b/filebeat/input/log/input.go index 365da416ed3..1b203adcf5e 100644 --- a/filebeat/input/log/input.go +++ b/filebeat/input/log/input.go @@ -566,6 +566,7 @@ func (p *Input) harvestExistingFile(newState file.State, oldState file.State) { logp.Debug("input", "Updating state for renamed file: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset) // Update state because of file rotation oldState.Source = newState.Source + oldState.TTL = newState.TTL err := p.updateState(oldState) if err != nil { logp.Err("File rotation state update error: %s", err) From 374467e49016706dfdd927e04b5ea8a86cebdc66 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Wed, 21 Oct 2020 14:56:44 +0200 Subject: [PATCH 20/40] [Ingest Manager] Use ML_SYSTEM to detect if agent is running as a service (#21884) [Ingest Manager] Use ML_SYSTEM to detect if agent is running as a service (#21884) --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + x-pack/elastic-agent/pkg/agent/install/svc_windows.go | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 3882ba19712..7088904a820 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -17,6 +17,7 @@ - Fix issue where inputs without processors defined would panic {pull}21628[21628] - Prevent reporting ecs version twice {pull}21616[21616] - Partial extracted beat result in failure to spawn beat {issue}21718[21718] +- Use ML_SYSTEM to detect if agent is running as a service {pull}21884[21884] - Use local temp instead of system one {pull}21883[21883] - Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] - Fix issue with named pipes on Windows 7 {pull}21931[21931] diff --git a/x-pack/elastic-agent/pkg/agent/install/svc_windows.go b/x-pack/elastic-agent/pkg/agent/install/svc_windows.go index 9084f3b5ea7..a60aadb5494 100644 --- a/x-pack/elastic-agent/pkg/agent/install/svc_windows.go +++ b/x-pack/elastic-agent/pkg/agent/install/svc_windows.go @@ -10,10 +10,14 @@ import ( "golang.org/x/sys/windows" ) +const ( + ML_SYSTEM_RID = 0x4000 +) + // RunningUnderSupervisor returns true when executing Agent is running under // the supervisor processes of the OS. func RunningUnderSupervisor() bool { - serviceSid, err := allocSid(windows.SECURITY_SERVICE_RID) + serviceSid, err := allocSid(ML_SYSTEM_RID) if err != nil { return false } @@ -40,7 +44,7 @@ func RunningUnderSupervisor() bool { func allocSid(subAuth0 uint32) (*windows.SID, error) { var sid *windows.SID - err := windows.AllocateAndInitializeSid(&windows.SECURITY_NT_AUTHORITY, + err := windows.AllocateAndInitializeSid(&windows.SECURITY_MANDATORY_LABEL_AUTHORITY, 1, subAuth0, 0, 0, 0, 0, 0, 0, 0, &sid) if err != nil { return nil, err From fc007701ecc42f7c6dc0e11762029944539fe1b2 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Wed, 21 Oct 2020 11:26:19 -0700 Subject: [PATCH 21/40] Fix typo (#19585) (#22061) Co-authored-by: Byungjin Park (BJ) --- heartbeat/docs/monitors/monitor-http.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/heartbeat/docs/monitors/monitor-http.asciidoc b/heartbeat/docs/monitors/monitor-http.asciidoc index ea981ea62b7..33d29dec89a 100644 --- a/heartbeat/docs/monitors/monitor-http.asciidoc +++ b/heartbeat/docs/monitors/monitor-http.asciidoc @@ -161,7 +161,7 @@ Under `check.response`, specify these options: *`status`*:: A list of expected status codes. 4xx and 5xx codes are considered `down` by default. Other codes are considered `up`. *`headers`*:: The required response headers. -*`body`*:: A list of regular expressions to match the the body output. Only a single expression needs to match. HTTP response +*`body`*:: A list of regular expressions to match the body output. Only a single expression needs to match. HTTP response bodies of up to 100MiB are supported. Example configuration: From ba2b2f935f1c6badc316f62417d87d630991ad2f Mon Sep 17 00:00:00 2001 From: Alex K <8418476+fearful-symmetry@users.noreply.github.com> Date: Wed, 21 Oct 2020 11:43:43 -0700 Subject: [PATCH 22/40] revert WSS process reporting for windows (#22055) * revert WSS process reporting for windows * add changelog --- CHANGELOG.next.asciidoc | 1 + metricbeat/module/system/process/process.go | 15 +-------------- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index f2750175969..059bbdb1cf6 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -378,6 +378,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix azure storage event format. {pull}21845[21845] - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] - [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] +- Revert change to report `process.memory.rss` as `process.memory.wss` on Windows. {pull}22055[22055] *Packetbeat* diff --git a/metricbeat/module/system/process/process.go b/metricbeat/module/system/process/process.go index c99ffaa1123..141a4a3a62d 100644 --- a/metricbeat/module/system/process/process.go +++ b/metricbeat/module/system/process/process.go @@ -150,24 +150,11 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { rootFields.Put("process.args", args) } - // This is a temporary fix until we make these changes global across libbeat - // This logic should happen in libbeat getProcessEvent() - - // There's some more Windows memory quirks we need to deal with. - // "rss" is a linux concept, but "wss" is a direct match on Windows. - // "share" is also unavailable on Windows. + // "share" is unavailable on Windows. if runtime.GOOS == "windows" { proc.Delete("memory.share") } - if m.IsAgent { - if runtime.GOOS == "windows" { - if setSize := getAndRemove(proc, "memory.rss"); setSize != nil { - proc.Put("memory.wss", setSize) - } - } - } - e := mb.Event{ RootFields: rootFields, MetricSetFields: proc, From 215f49cf50a079d5f0963eeab3d1336d897c36f9 Mon Sep 17 00:00:00 2001 From: Ichinose Shogo Date: Thu, 22 Oct 2020 17:03:50 +0900 Subject: [PATCH 23/40] Fix the url of reviewdog (#21981) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c21c0a7346e..d64bb07776b 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ GOLINT=golint GOLINT_REPO=golang.org/x/lint/golint REVIEWDOG=reviewdog REVIEWDOG_OPTIONS?=-diff "git diff master" -REVIEWDOG_REPO=github.com/haya14busa/reviewdog/cmd/reviewdog +REVIEWDOG_REPO=github.com/reviewdog/reviewdog/cmd/reviewdog XPACK_SUFFIX=x-pack/ # PROJECTS_XPACK_PKG is a list of Beats that have independent packaging support From 69cddaa1a0979a65c0bd8e3362ad69f5f9125652 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Thu, 22 Oct 2020 11:38:09 +0100 Subject: [PATCH 24/40] [build][packaging] Add resilience when docker build (#22050) --- dev-tools/mage/dockerbuilder.go | 9 ++++++++- x-pack/elastic-agent/magefile.go | 13 +++++++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/dev-tools/mage/dockerbuilder.go b/dev-tools/mage/dockerbuilder.go index 90a99434884..503fcae9cfc 100644 --- a/dev-tools/mage/dockerbuilder.go +++ b/dev-tools/mage/dockerbuilder.go @@ -26,6 +26,7 @@ import ( "os/exec" "path/filepath" "strings" + "time" "github.com/magefile/mage/sh" "github.com/pkg/errors" @@ -71,7 +72,13 @@ func (b *dockerBuilder) Build() error { tag, err := b.dockerBuild() if err != nil { - return errors.Wrap(err, "failed to build docker") + fmt.Println(">> Building docker images again (after 10 seconds)") + // This sleep is to avoid hitting the docker build issues when resources are not available. + time.Sleep(10) + tag, err = b.dockerBuild() + if err != nil { + return errors.Wrap(err, "failed to build docker") + } } if err := b.dockerSave(tag); err != nil { diff --git a/x-pack/elastic-agent/magefile.go b/x-pack/elastic-agent/magefile.go index a1aaba840fb..fad5ef935aa 100644 --- a/x-pack/elastic-agent/magefile.go +++ b/x-pack/elastic-agent/magefile.go @@ -513,8 +513,13 @@ func runAgent(env map[string]string) error { } // build docker image - if err := sh.Run("docker", "build", "-t", tag, "."); err != nil { - return err + if err := dockerBuild(tag); err != nil { + fmt.Println(">> Building docker images again (after 10 seconds)") + // This sleep is to avoid hitting the docker build issues when resources are not available. + time.Sleep(10) + if err := dockerBuild(tag); err != nil { + return err + } } } @@ -625,6 +630,10 @@ func copyAll(from, to string) error { }) } +func dockerBuild(tag string) error { + return sh.Run("docker", "build", "-t", tag, ".") +} + func dockerTag() string { const commitLen = 7 tagBase := "elastic-agent" From 5553dc24d26e0a12119083a39df0a904dbb7e2d9 Mon Sep 17 00:00:00 2001 From: Ivan Fernandez Calvo Date: Thu, 22 Oct 2020 13:23:38 +0200 Subject: [PATCH 25/40] docs: Prepare Changelog for 6.8.13 (#22072) (#22079) * docs: Close changelog for 6.8.13 * Apply suggestions from code review Co-authored-by: kuisathaverat Co-authored-by: Ivan Fernandez Calvo # Conflicts: # CHANGELOG.asciidoc # libbeat/docs/release.asciidoc Co-authored-by: Elastic Machine --- CHANGELOG.asciidoc | 40 +++++++++++++++++++++++++++++++++++ libbeat/docs/release.asciidoc | 4 ++++ 2 files changed, 44 insertions(+) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 1dfbb2fb889..5c364aeae64 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -2575,6 +2575,46 @@ https://github.com/elastic/beats/compare/v6.5.0...v7.0.0-alpha1[View commits] - Added support to calculate certificates' fingerprints (MD5, SHA-1, SHA-256). {issue}8180[8180] - Support new TLS version negotiation introduced in TLS 1.3. {issue}8647[8647]. +[[release-notes-6.8.13]] +=== Beats version 6.8.13 +https://github.com/elastic/beats/compare/v6.8.12...v6.8.13[View commits] + +==== Added + +*Filebeat* + +- Add container image in Kubernetes metadata {pull}13356[13356] {issue}12688[12688] + +[[release-notes-6.8.12]] +=== Beats version 6.8.12 +https://github.com/elastic/beats/compare/v6.8.11...v6.8.12[View commits] + +==== Bugfixes + +*Filebeat* + +- Fix Filebeat OOMs on very long lines {issue}19500[19500], {pull}19552[19552] + +[[release-notes-6.8.11]] +=== Beats version 6.8.11 +https://github.com/elastic/beats/compare/v6.8.10...v6.8.11[View commits] + +==== Bugfixes + +*Metricbeat* + +- Fix bug incorrect parsing of float numbers as integers in Couchbase module {issue}18949[18949] {pull}19055[19055] + +[[release-notes-6.8.10]] +=== Beats version 6.8.10 +https://github.com/elastic/beats/compare/v6.8.9...v6.8.10[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- Fix `add_cloud_metadata` to better support modifying sub-fields with other processors. {pull}13808[13808] + [[release-notes-6.8.9]] === Beats version 6.8.9 https://github.com/elastic/beats/compare/v6.8.8...v6.8.9[View commits] diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index 90dd214787a..caf94c3bf2d 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -39,6 +39,10 @@ upgrade. * <> * <> * <> +* <> +* <> +* <> +* <> * <> * <> * <> From 82c5855d965722f281ad611d11c01898084512bb Mon Sep 17 00:00:00 2001 From: Ivan Fernandez Calvo Date: Thu, 22 Oct 2020 13:23:51 +0200 Subject: [PATCH 26/40] docs: Prepare Changelog for 7.9.3 (#22073) (#22075) * docs: Close changelog for 7.9.3 * Apply suggestions from code review Co-authored-by: kuisathaverat Co-authored-by: Ivan Fernandez Calvo Co-authored-by: Elastic Machine --- CHANGELOG.asciidoc | 24 ++++++++++++++++++++++++ CHANGELOG.next.asciidoc | 4 ++++ libbeat/docs/release.asciidoc | 1 + 3 files changed, 29 insertions(+) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 5c364aeae64..349eb49edb3 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -3,6 +3,30 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-7.9.3]] +=== Beats version 7.9.3 +https://github.com/elastic/beats/compare/v7.9.2...v7.9.3[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- The `o365input` and `o365` module now recover from an authentication problem or other fatal errors, instead of terminating. {pull}21258[21258] + +*Auditbeat* + +- system/socket: Fixed a crash due to concurrent map read and write. {issue}21192[21192] {pull}21690[21690] + +*Filebeat* + +- Add field limit check for AWS Cloudtrail flattened fields. {pull}21388[21388] {issue}21382[21382] +*Metricbeat* + +- Fix remote_write flaky test. {pull}21173[21173] +- Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] +- [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] + + [[release-notes-7.9.2]] === Beats version 7.9.2 https://github.com/elastic/beats/compare/v7.9.1...v7.9.2[View commits] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 059bbdb1cf6..a2dcaa48f2d 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -379,6 +379,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] - [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] - Revert change to report `process.memory.rss` as `process.memory.wss` on Windows. {pull}22055[22055] +- Add a switch to the driver definition on SQL module to use pretty names {pull}17378[17378] *Packetbeat* @@ -821,3 +822,6 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d ==== Known Issue *Journalbeat* + + + diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index caf94c3bf2d..724d8af03c3 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> * <> * <> * <> From fb6d8ef3b7e3fb13af5cbd73220ac785aa50dead Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Thu, 22 Oct 2020 15:34:38 +0200 Subject: [PATCH 27/40] chore: use ubuntu 18 as linux agent (#22084) --- .ci/packaging.groovy | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 91902595a3c..073c977a22e 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -43,7 +43,7 @@ pipeline { } stages { stage('Filter build') { - agent { label 'ubuntu && immutable' } + agent { label 'ubuntu-18 && immutable' } when { beforeAgent true anyOf { @@ -98,7 +98,7 @@ pipeline { } stages { stage('Package Linux'){ - agent { label 'ubuntu && immutable' } + agent { label 'ubuntu-18 && immutable' } options { skipDefaultCheckout() } when { beforeAgent true @@ -160,7 +160,7 @@ pipeline { } } stage('Run E2E Tests for Packages'){ - agent { label 'ubuntu && immutable' } + agent { label 'ubuntu-18 && immutable' } options { skipDefaultCheckout() } steps { runE2ETests() From 9aefcfe692961b5cc309ad888d5960e83a3c25f8 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Thu, 22 Oct 2020 17:10:20 +0200 Subject: [PATCH 28/40] [Ingest Manager] Use symlink path for reexecutions (#21835) [Ingest Manager] Use symlink path for reexecutions (#21835) --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + x-pack/elastic-agent/pkg/agent/cmd/run.go | 20 +++++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 7088904a820..b6a870e0259 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -17,6 +17,7 @@ - Fix issue where inputs without processors defined would panic {pull}21628[21628] - Prevent reporting ecs version twice {pull}21616[21616] - Partial extracted beat result in failure to spawn beat {issue}21718[21718] +- Use symlink path for reexecutions {pull}21835[21835] - Use ML_SYSTEM to detect if agent is running as a service {pull}21884[21884] - Use local temp instead of system one {pull}21883[21883] - Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] diff --git a/x-pack/elastic-agent/pkg/agent/cmd/run.go b/x-pack/elastic-agent/pkg/agent/cmd/run.go index 84dd8bd8a9a..b014cd69084 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/run.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/run.go @@ -9,6 +9,7 @@ import ( "fmt" "os" "os/signal" + "path/filepath" "syscall" "github.com/spf13/cobra" @@ -26,6 +27,10 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" ) +const ( + agentName = "elastic-agent" +) + func newRunCommandWithArgs(flags *globalFlags, _ []string, streams *cli.IOStreams) *cobra.Command { return &cobra.Command{ Use: "run", @@ -87,7 +92,7 @@ func run(flags *globalFlags, streams *cli.IOStreams) error { // Windows: Mark se logger.Warn("Artifact has been build with security disabled. Elastic Agent will not verify signatures of used artifacts.") } - execPath, err := os.Executable() + execPath, err := reexecPath() if err != nil { return err } @@ -146,3 +151,16 @@ func run(flags *globalFlags, streams *cli.IOStreams) error { // Windows: Mark se rex.ShutdownComplete() return err } + +func reexecPath() (string, error) { + // set executable path to symlink instead of binary + // in case of updated symlinks we should spin up new agent + potentialReexec := filepath.Join(paths.Top(), agentName) + + // in case it does not exists fallback to executable + if _, err := os.Stat(potentialReexec); os.IsNotExist(err) { + return os.Executable() + } + + return potentialReexec, nil +} From daed8f9361d6c2708d84d3764a5c9ae52b042238 Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Thu, 22 Oct 2020 11:49:26 -0400 Subject: [PATCH 29/40] Remove suricata.eve.timestamp alias (#22095) Remove the suricata.eve.timestamp alias field from the Suricata module. This is a breaking change for anything that we dependent upon the field, but its presence caused issue in Kibana since it was always displayed in Discover. Fixes #10535 --- CHANGELOG.next.asciidoc | 1 + filebeat/docs/fields.asciidoc | 9 --------- x-pack/filebeat/module/suricata/eve/_meta/fields.yml | 4 ---- x-pack/filebeat/module/suricata/fields.go | 2 +- 4 files changed, 2 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index a2dcaa48f2d..1bf2cc8f762 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -80,6 +80,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add support for GMT timezone offsets in `decode_cef`. {pull}20993[20993] - Fix parsing of Elasticsearch node name by `elasticsearch/slowlog` fileset. {pull}14547[14547] - API address and shard ID are required settings in the Cloud Foundry input. {pull}21759[21759] +- Remove `suricata.eve.timestamp` alias field. {issue}10535[10535] {pull}22095[22095] *Heartbeat* diff --git a/filebeat/docs/fields.asciidoc b/filebeat/docs/fields.asciidoc index b4f6a158ad7..b1ee49fed5c 100644 --- a/filebeat/docs/fields.asciidoc +++ b/filebeat/docs/fields.asciidoc @@ -143952,15 +143952,6 @@ type: keyword -- -*`suricata.eve.timestamp`*:: -+ --- -type: alias - -alias to: @timestamp - --- - *`suricata.eve.in_iface`*:: + -- diff --git a/x-pack/filebeat/module/suricata/eve/_meta/fields.yml b/x-pack/filebeat/module/suricata/eve/_meta/fields.yml index 45980b888b0..dffb86e7ebe 100644 --- a/x-pack/filebeat/module/suricata/eve/_meta/fields.yml +++ b/x-pack/filebeat/module/suricata/eve/_meta/fields.yml @@ -176,10 +176,6 @@ - name: http_content_type type: keyword - - name: timestamp - type: alias - path: '@timestamp' - - name: in_iface type: keyword diff --git a/x-pack/filebeat/module/suricata/fields.go b/x-pack/filebeat/module/suricata/fields.go index 4cba3d5ee74..105704a1cc3 100644 --- a/x-pack/filebeat/module/suricata/fields.go +++ b/x-pack/filebeat/module/suricata/fields.go @@ -19,5 +19,5 @@ func init() { // AssetSuricata returns asset data. // This is the base64 encoded gzipped contents of module/suricata. func AssetSuricata() string { - return "eJzsXEuP67YV3t9fod2sYjRpExSzKLpJFwXaLgJ0SxyTRxJjvi5J2eP++kKyxyNbpCw+MEWTO6uBZ/jxvHhePPR3zQHPr40bLKfg4UvTeO4Fvja/fHzC0FHLjedavTZ/+dI0TfMPzQaBTatt04Nigquu8T02P//75+bvv/zrn43QnWuM1WygyJr9+Ya3+9I0LUfB3OuE9F2jQOIdBeOPPxt8bTqrB3P9JEDF+PO3CatprZYTBe/7TKQI3TUtF7i7/vt84/nmeMTbZ6G9V/af0YBvRlt/YXchjNmCRyoeKFGejBTc/fmdqAOeT9qy29+CGGAMMVZ7TbTlXT6Opya4+FEyMZ7ukEgr4E4Km8n5gDGujwDstRYI6hnAjQ7iaRkpQA9lpLizKgTw4B+NJJGJmTgKNWOdL+Om5Vni+FgukKtWV7JX18P3ZQIZCRp/i6CA4LAUuQHfX5buxl+fqu+NcBbZQGjVfYIFOa8txmjYqPoOTMz6th6FHn748acyTiT7sVAU/D/Z2r5bGzRwTqWJh4W4tt/XM3SejOEpuD5E4IW8cR1XMEa93d3y4C7O0pxNnB4sxQ34U1RLBFfoT9oedt6Cchu2oGAIVWEWnst5lAAPh82n/M/WhXWoHpdnx+OlIaU6fFsDY8VDbsVgHzljJkSpG/VeFKy2VLNCGWRQfwtTQp+W3CfkiCiBP7KfHXU9+CErEbnzcsnHb+7jnp3ByQ8HVLZV4L33tZLqVXGthZyRhp1FZ7RyuLvA3PMUs1Vk3CKNpXobzXXcngwOLYEOF452CwMfi3djhcMViGebTpGD6thJTSHdYos2X+xfB3R+N4HYGU5kx8HGSF4VkBWbJdNr5zMz1XEbpiXwpxmaQNX5WPG23Vb3mp13+7NHt0lTEn2vY85xs6oeUNY2pFr5SP2eVnlzic6DTPVkL3+9rXx54scU4S3Qki6DwEiil+7KKHjstD0XZuB4RMt9DGVN4VPbZbcAiPrBY0HE78qKNt4p8IMtzBmAjuEuW1J68FTLpwHjRmxOhnUDWXR9ss1M8HjACcHFAB/iCjmidSFxblTKjFfd+hNYLEX8OBL2GA1WvyGO5zlRrSKJglk5Z3nCO6BVKIgBesBAz22LI1iAMasDDZQsKN7mg90yFjPGoAIPx9DHc8w8sYdiVRpRgby9jCSJksJyWZraJMrBLWWdwljLBZKpjVeVPW1QkRG7zJgYthYerzAKBQ9vZAQlPS8+gdwc/xSFCJMXJ/A+/dNDkLpt9M3Ua6GTqKpgWQTnUO5FoOO7FW0muWXD9pvknh9XoU9Vj4OnY0Wb60Vmbp9VcGblHhEl2o5IzZCg8hi6nMgG1KFUKg1teaWaisCpXDs6CSBxz7UNxBkIBI1PD2AhkRbltw4HpkkLPHxak0TkFLmY9ZSllWpNObSeMPAw2aIAM5FZSiS6MfEujYUXuZXSclahm/V8OSltJYgqYgpc2H+asc+ZO4LgjNAe6cENslj5U5CraKVXy6yjQ8Gdr6G80HxCoqC8RZCEofE9sQi0L/YOt6zgTKqYxgyvK1bAu4NhvG1J8JItDU9pEsxd0qoBqlnldgYcO2IOngSv79NYvG9M52GUJwgW8oQ88+SmlIajAEW+cvW1EOdFDUK8FIIIP6JEQfKLjUG5wVxG/sId/62UzkR/8MRrTZyEIMlZsaJQfl1xlrdaJleoFzea9aFGvf8T4YpUoIabtYOab5Tv+QE3Kx3VZH+g//flIVtcVjUVxFX3wI1+rzTPoIFGY2KeCW+1QtpUQZcm9dKI0oPnVhz4/1vdjNaZYjNB36NVWJrVjp65lkNDxD//4YfvYXnNn5TjBQrR8j476YTeQ6kNXbHCc7Of37kX+kRkVzcdtvrkyH5wy0v0NP5G4hy5tmOrYCldB23i0B24McUFHBXaISPGDqoYS+GpDtBFWhalPhZj7c9mLCkrsTiNINdj8WoOhKvy0vmCOPVUysv6EUrCm8BSJ28rE4XSBKZjUnwOGEMEnCvX4KuNgYLLoGDakXwPFMzHUlGC98cZxExdaRLOplPBmHIVkcKdi1QkJ2vKqQ5N0uXXUGtjTDkoXAY7e8kKo2gNraX9C1gtA9hno9wO/ts3b/I7dwB1Tttv75jErTl+PtbH5JT2e2zj80NsXixFMLhzA1oWm0fdOuyoeBlArGGWRsXlOpVYdIMsfRvZctWhNZZHx1a3D4LyQP2bhKG0h9ZHc78tinbD/tfiByS/wh/TegQMWxiEJ5Mhj8mBCBQMzyOA85ar5TBanP4lRA+rfmm7AH6H/M9rkMsQcmTUdxNKoObI9YDTvR/xenW8fG2mfv4AbdPrFuchMM/6fKPrM4e71ZEtzMFX4+lx3DmyJXQlY8MVXrO/K3J1an6N6etD3k06tAiuNNZclVRK70b9oIpFsy2uf5rBzouHi5MfPLjPn4AvHgGGU6PAe+CtadXMNz1+wUiCbwrkwLm+yVLjyUJgiWYmgQvSWr0cNkqC6VFkEbIULr4ZpEtjSnL/y2+jeS7j/wYAAP//9F32EA==" + return "eJzsXEuP47gRvs+v0G1Pa2Q32UXQh9w2hwBJDgvkSpTJksQxX0NSdju/PpDsdssWKYsPdLAz06eGu/mxXqwXi/6xOeD5pXGD5RQ8fGoaz73Al+b3908YOmq58Vyrl+Zvn5qmaf6p2SCwabVtelBMcNU1vsfmt//81vzj93//qxG6c42xmg0UWbM/3/B2n5qm5SiYe5mQfmwUSLyjYPzxZ4MvTWf1YK6fBKgYf/4+YTWt1XKi4G2fiRShu6blAnfXf59vPN8cj3j7LLT3yv4zGvDVaOsv7C6EMVvwSMUDJcqTkYK7P78RdcDzSVt2+1sQA4whxmqviba8y8fx1AQXP0omxtMdEmkF3ElhMznvMMb1EYC91gJBPQO40UE8LSMF6KGMFHdWhQAe/KORJDIxE0ehZqzzZdy0PEsc78sFctXqSvbqevipTCAjQeNvERQQHJYiN+D7y9Ld+OtT9b0SziIbCK26D7Ag57XFGA0bVd+BiVnf1qPQw8+//FrGiWS/FIqC/zdb23drgwbOqTTxsBDX9tt6hs6TMTwF14cIvJA3ruMKxqi3u1se3MVZmrOJ04OluAF/imqJ4Ar9SdvDzltQbsMWFAyhKszCczmPEuDhsPmU/9m6sA7V4/LseLw0pFSHb2tgrHjIrRjsPWfMhCh1o96LgtWWalYogwzqb2FK6NOS+4QcESXwR/azo64HP2QlIndeLvn4zX3cszM4+eGAyrYKvPe+VlK9Kq61kDPSsLPojFYOdxeYe55itoqMW6SxVG+juY7bk8GhJdDhwtFuYeB98W6scLgC8WzTKXJQHTupKaRbbNHmi/3LgM7vJhA7w4nsONgYyasCsmKzZHrtfGamOm7DtAT+NEMTqDofK9622+pes/Nuf/boNmlKou91zDluVtUDytqGVCsfqd+T/BhXhLdAS3oAAiNpWLqjoeCx0/ZcmB/jES33MZQ1dUxNkd0CIOqljgXxuCsrqXinwA+2MKIDHYNRtqT04KmWT935jdic/OcGsujJZJuZ4PFwEIKLAT54fXJE60Li3KiUGa+69SewWIr4fiTsMRpKviKO5xlLrRKGglk5Z3nCO6BVKIgBesBAR2yLI1iAMasD7Y0sKN7mg93yCeN5NNZvAWDo4xlgnthDsSqNqEBWXUaSRElhuSxNbRLl4JayTmGs5QLJ1GSryp42qMiIXWZMDFsLjxcMhYKHVzKCkp4Xn0Bujn+JQoTJixM4q/e5RD0EqdtG30y9FjqJqgqWRXAO5V4E+rFb0WaSW7ZTv0vu+XEV+lT1OHg61pu5XmTm9lkFZ1buEVGi7YjUDAkqj6Grg2xAHUql0tCWF56pCJzKtaOTABL3XNtAnIFA0PjwABYSaVF+63BgmrTAw6c1SUROkYtZT1laqdaUQ+sJAw+TLQowE5mlRKIbE+/SWHiRWyktZxW6986Xk9JWgqgipsB1+ocZ+5y5IwjOCO2RHtwgi5U/BbmKVnq1zDo6FNz5GsoLTQ8kCspbBEkYGt8Ti0D7Yu9wywrOpIppzPC6YgW8ORjG25YEr8DS8JQmwdwlrRqgmlVuZ8CxI+bgSfByPY3F+7ZxHkZ5gmAhT8gzT25KaTgKUOQLV18KcX5QgxA/FIIIP6JEQfKLjUG5wVwG8sL9+K2UzkR/8MRrTZyEIMlZsaJQfl1xlrdaJleoFzea9aFGvf8r4YpUoIabtYOab5Rv+QE3Kx3VZH+g///lIVtcVjUVxFX3wI1+rzTPoIFGY2KeCa+1QtpUQZcm9dKI0oPnVhz4H61uRutMsZmg79EqLM1qR89cy6Eh4l//9PNPsLyET8rxAoVoeZ+ddELvodSGrljhqdaP79wLfSKyq5sOW31yZD+45SV6Gn8jcY5c27FVsJSugzZx6A7cmOICjgrtkBFjB1WMpfBUB+giLYtSH4ux9mczlpSVWJwGhOuxeDUHwlV56XxBnHoq5WX9CCXhVWCpk7eViUJpAtMxKT4HjCECzpVr8NXGQMFlUDDtSL4HCuZjqSjB++MMYqauNAln06lgTLmKSOHORSqSkzXlVIcm6fJrqLUxphwULoOdvWSFUbSG1tL+BayWAeyzUW4H//W7N/nGHUCd0/b1HZO4NcfPx/qYnNJ+j218fojNi6UIBnduQMti86hbhx0VLwOINczSqLhcpxKLbpClLxdbrjq0xvLo2Or2QVAeqH+TMJT20Ppo7rdF0W7Yfy5+3vEZ/pzWI2DYwiA8mQx5TA5EoGB4HgGct1wth9Hi9C8helj1S9sF8A3yP69BLkPIkVHfTSiBmiPXA073fsTr1fHytZn6+fOwTW9PnIfAPOvzja7PHO5WR7YwB1+Np8dx58iW0JWMDVd4a/6myNWp+TWmr89sN+nQIrjSWHNVUim9G/WDKhbNtrj+aQY7Lx4uTn7w4D5/oL14ohdOjQKvdbemVTPf9Pj1Hwm+KZAD5/omS40nC4ElmpkELkhr9XLYKAmmR5FFyFK4+GqQLo0pyf0vvyvmuYz/FwAA//8GEN89" } From 5d077092d3e0aacfecae81ea307a3c6fda748705 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Thu, 22 Oct 2020 11:34:32 -0600 Subject: [PATCH 30/40] Add max_number_of_messages into aws filebeat fileset vars (#22057) --- x-pack/filebeat/filebeat.reference.yml | 36 +++++++++++++++++++ x-pack/filebeat/module/aws/_meta/config.yml | 36 +++++++++++++++++++ .../module/aws/cloudtrail/config/s3.yml | 4 +++ .../module/aws/cloudtrail/manifest.yml | 2 ++ .../module/aws/cloudwatch/config/s3.yml | 4 +++ .../module/aws/cloudwatch/manifest.yml | 2 ++ x-pack/filebeat/module/aws/ec2/config/s3.yml | 4 +++ x-pack/filebeat/module/aws/ec2/manifest.yml | 2 ++ x-pack/filebeat/module/aws/elb/config/s3.yml | 4 +++ x-pack/filebeat/module/aws/elb/manifest.yml | 2 ++ .../module/aws/s3access/config/s3.yml | 4 +++ .../filebeat/module/aws/s3access/manifest.yml | 2 ++ .../module/aws/vpcflow/config/input.yml | 4 +++ .../filebeat/module/aws/vpcflow/manifest.yml | 2 ++ x-pack/filebeat/modules.d/aws.yml.disabled | 36 +++++++++++++++++++ 15 files changed, 144 insertions(+) diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 49ede1c7d24..f10a46aa20e 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -142,6 +142,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + cloudwatch: enabled: false @@ -176,6 +182,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + ec2: enabled: false @@ -210,6 +222,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + elb: enabled: false @@ -244,6 +262,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + s3access: enabled: false @@ -278,6 +302,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + vpcflow: enabled: false @@ -312,6 +342,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + #-------------------------------- Azure Module -------------------------------- - module: azure # All logs diff --git a/x-pack/filebeat/module/aws/_meta/config.yml b/x-pack/filebeat/module/aws/_meta/config.yml index b7e0c25b674..e4b521e467f 100644 --- a/x-pack/filebeat/module/aws/_meta/config.yml +++ b/x-pack/filebeat/module/aws/_meta/config.yml @@ -45,6 +45,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + cloudwatch: enabled: false @@ -79,6 +85,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + ec2: enabled: false @@ -113,6 +125,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + elb: enabled: false @@ -147,6 +165,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + s3access: enabled: false @@ -181,6 +205,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + vpcflow: enabled: false @@ -214,3 +244,9 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 diff --git a/x-pack/filebeat/module/aws/cloudtrail/config/s3.yml b/x-pack/filebeat/module/aws/cloudtrail/config/s3.yml index ac1caacf21c..d11da6c6a52 100644 --- a/x-pack/filebeat/module/aws/cloudtrail/config/s3.yml +++ b/x-pack/filebeat/module/aws/cloudtrail/config/s3.yml @@ -55,6 +55,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/cloudtrail/manifest.yml b/x-pack/filebeat/module/aws/cloudtrail/manifest.yml index 732967ff0b0..03c7acf1336 100644 --- a/x-pack/filebeat/module/aws/cloudtrail/manifest.yml +++ b/x-pack/filebeat/module/aws/cloudtrail/manifest.yml @@ -21,6 +21,8 @@ var: default: true - name: process_insight_logs default: true + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/cloudwatch/config/s3.yml b/x-pack/filebeat/module/aws/cloudwatch/config/s3.yml index bdb0ff350f0..7364f997a65 100644 --- a/x-pack/filebeat/module/aws/cloudwatch/config/s3.yml +++ b/x-pack/filebeat/module/aws/cloudwatch/config/s3.yml @@ -41,6 +41,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/cloudwatch/manifest.yml b/x-pack/filebeat/module/aws/cloudwatch/manifest.yml index 2878c79936d..5d9931b2e40 100644 --- a/x-pack/filebeat/module/aws/cloudwatch/manifest.yml +++ b/x-pack/filebeat/module/aws/cloudwatch/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/ec2/config/s3.yml b/x-pack/filebeat/module/aws/ec2/config/s3.yml index bdb0ff350f0..7364f997a65 100644 --- a/x-pack/filebeat/module/aws/ec2/config/s3.yml +++ b/x-pack/filebeat/module/aws/ec2/config/s3.yml @@ -41,6 +41,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/ec2/manifest.yml b/x-pack/filebeat/module/aws/ec2/manifest.yml index 2878c79936d..5d9931b2e40 100644 --- a/x-pack/filebeat/module/aws/ec2/manifest.yml +++ b/x-pack/filebeat/module/aws/ec2/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/elb/config/s3.yml b/x-pack/filebeat/module/aws/elb/config/s3.yml index bdb0ff350f0..7364f997a65 100644 --- a/x-pack/filebeat/module/aws/elb/config/s3.yml +++ b/x-pack/filebeat/module/aws/elb/config/s3.yml @@ -41,6 +41,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/elb/manifest.yml b/x-pack/filebeat/module/aws/elb/manifest.yml index f823ccbacce..dc95f6abb7e 100644 --- a/x-pack/filebeat/module/aws/elb/manifest.yml +++ b/x-pack/filebeat/module/aws/elb/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/s3access/config/s3.yml b/x-pack/filebeat/module/aws/s3access/config/s3.yml index bdb0ff350f0..7364f997a65 100644 --- a/x-pack/filebeat/module/aws/s3access/config/s3.yml +++ b/x-pack/filebeat/module/aws/s3access/config/s3.yml @@ -41,6 +41,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/s3access/manifest.yml b/x-pack/filebeat/module/aws/s3access/manifest.yml index 2878c79936d..5d9931b2e40 100644 --- a/x-pack/filebeat/module/aws/s3access/manifest.yml +++ b/x-pack/filebeat/module/aws/s3access/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/vpcflow/config/input.yml b/x-pack/filebeat/module/aws/vpcflow/config/input.yml index 628196b7d3e..de4affbd694 100644 --- a/x-pack/filebeat/module/aws/vpcflow/config/input.yml +++ b/x-pack/filebeat/module/aws/vpcflow/config/input.yml @@ -43,6 +43,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + {{ else if eq .input "file" }} type: log diff --git a/x-pack/filebeat/module/aws/vpcflow/manifest.yml b/x-pack/filebeat/module/aws/vpcflow/manifest.yml index c7df14a4050..19f40c7a3f7 100644 --- a/x-pack/filebeat/module/aws/vpcflow/manifest.yml +++ b/x-pack/filebeat/module/aws/vpcflow/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/input.yml diff --git a/x-pack/filebeat/modules.d/aws.yml.disabled b/x-pack/filebeat/modules.d/aws.yml.disabled index 0fe8465211b..f3d2ac1f7c9 100644 --- a/x-pack/filebeat/modules.d/aws.yml.disabled +++ b/x-pack/filebeat/modules.d/aws.yml.disabled @@ -48,6 +48,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + cloudwatch: enabled: false @@ -82,6 +88,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + ec2: enabled: false @@ -116,6 +128,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + elb: enabled: false @@ -150,6 +168,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + s3access: enabled: false @@ -184,6 +208,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + vpcflow: enabled: false @@ -217,3 +247,9 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 From cc2217ced1dd549dbbed0abbd048caac6150ecf7 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Thu, 22 Oct 2020 12:45:17 -0600 Subject: [PATCH 31/40] Check context.Canceled and fix s3 input config (#22036) --- .../_meta/config/filebeat.inputs.reference.xpack.yml.tmpl | 4 ++-- x-pack/filebeat/filebeat.reference.yml | 4 ++-- x-pack/filebeat/input/s3/collector.go | 2 ++ x-pack/filebeat/input/s3/input.go | 8 +++++++- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl b/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl index 16964b2c84e..f083b4c814b 100644 --- a/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl +++ b/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl @@ -67,8 +67,8 @@ #session_token: '${AWS_SESSION_TOKEN:"”}' #credential_profile_name: test-s3-input - # Queue urls (required) to receive queue messages from - #queue_urls: ["https://sqs.us-east-1.amazonaws.com/1234/test-s3-logs-queue"] + # Queue url (required) to receive queue messages from + #queue_url: "https://sqs.us-east-1.amazonaws.com/1234/test-s3-logs-queue" # The duration (in seconds) that the received messages are hidden from subsequent # retrieve requests after being retrieved by a ReceiveMessage request. diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index f10a46aa20e..80bfacbf2c3 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -2410,8 +2410,8 @@ filebeat.inputs: #session_token: '${AWS_SESSION_TOKEN:"”}' #credential_profile_name: test-s3-input - # Queue urls (required) to receive queue messages from - #queue_urls: ["https://sqs.us-east-1.amazonaws.com/1234/test-s3-logs-queue"] + # Queue url (required) to receive queue messages from + #queue_url: "https://sqs.us-east-1.amazonaws.com/1234/test-s3-logs-queue" # The duration (in seconds) that the received messages are hidden from subsequent # retrieve requests after being retrieved by a ReceiveMessage request. diff --git a/x-pack/filebeat/input/s3/collector.go b/x-pack/filebeat/input/s3/collector.go index c3d3114c723..9596b5ab23f 100644 --- a/x-pack/filebeat/input/s3/collector.go +++ b/x-pack/filebeat/input/s3/collector.go @@ -153,8 +153,10 @@ func (c *s3Collector) processorKeepAlive(svcSQS sqsiface.ClientAPI, message sqs. for { select { case <-c.cancellation.Done(): + fmt.Println("------- c.cancellation.Done()") return nil case err := <-errC: + fmt.Println("------- err = ", err) if err != nil { if err == context.DeadlineExceeded { c.logger.Info("Context deadline exceeded, updating visibility timeout") diff --git a/x-pack/filebeat/input/s3/input.go b/x-pack/filebeat/input/s3/input.go index 36f160d759e..a3f19f66327 100644 --- a/x-pack/filebeat/input/s3/input.go +++ b/x-pack/filebeat/input/s3/input.go @@ -5,6 +5,7 @@ package s3 import ( + "context" "fmt" "github.com/aws/aws-sdk-go-v2/service/s3" @@ -67,7 +68,12 @@ func (in *s3Input) Run(ctx v2.Context, pipeline beat.Pipeline) error { defer collector.publisher.Close() collector.run() - return ctx.Cancelation.Err() + + if ctx.Cancelation.Err() == context.Canceled { + return nil + } else { + return ctx.Cancelation.Err() + } } func (in *s3Input) createCollector(ctx v2.Context, pipeline beat.Pipeline) (*s3Collector, error) { From f33bfd9b5be3b1f9287b22c575d7f9a057eebb96 Mon Sep 17 00:00:00 2001 From: Brandon Morelli Date: Thu, 22 Oct 2020 12:46:36 -0700 Subject: [PATCH 32/40] docs: move kerberos include (#22109) --- auditbeat/docs/configuring-howto.asciidoc | 4 ++++ filebeat/docs/configuring-howto.asciidoc | 4 ++++ heartbeat/docs/configuring-howto.asciidoc | 4 ++++ journalbeat/docs/configuring-howto.asciidoc | 4 ++++ libbeat/docs/outputs-list.asciidoc | 4 ---- metricbeat/docs/configuring-howto.asciidoc | 4 ++++ packetbeat/docs/configuring-howto.asciidoc | 4 ++++ winlogbeat/docs/configuring-howto.asciidoc | 4 ++++ x-pack/functionbeat/docs/configuring-howto.asciidoc | 4 ++++ 9 files changed, 32 insertions(+), 4 deletions(-) diff --git a/auditbeat/docs/configuring-howto.asciidoc b/auditbeat/docs/configuring-howto.asciidoc index 745c58c7997..65938efb9c7 100644 --- a/auditbeat/docs/configuring-howto.asciidoc +++ b/auditbeat/docs/configuring-howto.asciidoc @@ -42,6 +42,10 @@ include::./reload-configuration.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/filebeat/docs/configuring-howto.asciidoc b/filebeat/docs/configuring-howto.asciidoc index ec70fe23942..f09902a0d26 100644 --- a/filebeat/docs/configuring-howto.asciidoc +++ b/filebeat/docs/configuring-howto.asciidoc @@ -44,6 +44,10 @@ include::./reload-configuration.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::../../libbeat/docs/shared-ilm.asciidoc[] diff --git a/heartbeat/docs/configuring-howto.asciidoc b/heartbeat/docs/configuring-howto.asciidoc index f562b8a42c1..fa312e5d4ac 100644 --- a/heartbeat/docs/configuring-howto.asciidoc +++ b/heartbeat/docs/configuring-howto.asciidoc @@ -38,6 +38,10 @@ include::{libbeat-dir}/shared-path-config.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/journalbeat/docs/configuring-howto.asciidoc b/journalbeat/docs/configuring-howto.asciidoc index 93083ac4ccc..246880468e3 100644 --- a/journalbeat/docs/configuring-howto.asciidoc +++ b/journalbeat/docs/configuring-howto.asciidoc @@ -34,6 +34,10 @@ include::{libbeat-dir}/shared-path-config.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/libbeat/docs/outputs-list.asciidoc b/libbeat/docs/outputs-list.asciidoc index bd3b2878aa6..4181c10f64f 100644 --- a/libbeat/docs/outputs-list.asciidoc +++ b/libbeat/docs/outputs-list.asciidoc @@ -83,9 +83,5 @@ ifdef::requires_xpack[] endif::[] include::{libbeat-outputs-dir}/codec/docs/codec.asciidoc[] endif::[] -ifndef::no_kerberos[] -include::{libbeat-dir}/shared-kerberos-config.asciidoc[] -endif::[] - //# end::outputs-include[] diff --git a/metricbeat/docs/configuring-howto.asciidoc b/metricbeat/docs/configuring-howto.asciidoc index 60f8928df53..dcacba01f79 100644 --- a/metricbeat/docs/configuring-howto.asciidoc +++ b/metricbeat/docs/configuring-howto.asciidoc @@ -40,6 +40,10 @@ include::{docdir}/../docs/reload-configuration.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/packetbeat/docs/configuring-howto.asciidoc b/packetbeat/docs/configuring-howto.asciidoc index cc9e3c9a926..8d27edbafd7 100644 --- a/packetbeat/docs/configuring-howto.asciidoc +++ b/packetbeat/docs/configuring-howto.asciidoc @@ -38,6 +38,10 @@ include::{libbeat-dir}/shared-path-config.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/winlogbeat/docs/configuring-howto.asciidoc b/winlogbeat/docs/configuring-howto.asciidoc index 5c1c6086ace..5d9d4758cf8 100644 --- a/winlogbeat/docs/configuring-howto.asciidoc +++ b/winlogbeat/docs/configuring-howto.asciidoc @@ -35,6 +35,10 @@ include::{libbeat-dir}/shared-path-config.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/x-pack/functionbeat/docs/configuring-howto.asciidoc b/x-pack/functionbeat/docs/configuring-howto.asciidoc index 192cb79fea3..3d72f9b5a55 100644 --- a/x-pack/functionbeat/docs/configuring-howto.asciidoc +++ b/x-pack/functionbeat/docs/configuring-howto.asciidoc @@ -35,6 +35,10 @@ include::./general-options.asciidoc[] [role="xpack"] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + [role="xpack"] include::{libbeat-dir}/shared-ssl-config.asciidoc[] From 2e7b90217e54016d9613ae0a1f58cba8a82cba00 Mon Sep 17 00:00:00 2001 From: Fae Charlton Date: Thu, 22 Oct 2020 15:57:56 -0400 Subject: [PATCH 33/40] [libbeat] Add more disk queue unit tests and fix a size-check bug (#22107) --- .../publisher/queue/diskqueue/core_loop.go | 24 +- .../queue/diskqueue/core_loop_test.go | 623 ++++++++++++++++-- libbeat/publisher/queue/diskqueue/queue.go | 5 + 3 files changed, 594 insertions(+), 58 deletions(-) diff --git a/libbeat/publisher/queue/diskqueue/core_loop.go b/libbeat/publisher/queue/diskqueue/core_loop.go index 77f4aadb47f..ac6e22c52d8 100644 --- a/libbeat/publisher/queue/diskqueue/core_loop.go +++ b/libbeat/publisher/queue/diskqueue/core_loop.go @@ -93,10 +93,10 @@ func (dq *diskQueue) handleProducerWriteRequest(request producerWriteRequest) { // than an entire segment all by itself (as long as it isn't, it is // guaranteed to eventually enter the queue assuming no disk errors). frameSize := request.frame.sizeOnDisk() - if dq.settings.MaxSegmentSize < frameSize { + if dq.settings.maxSegmentOffset() < segmentOffset(frameSize) { dq.logger.Warnf( - "Rejecting event with size %v because the maximum segment size is %v", - frameSize, dq.settings.MaxSegmentSize) + "Rejecting event with size %v because the segment buffer limit is %v", + frameSize, dq.settings.maxSegmentOffset()) request.responseChan <- false return } @@ -326,13 +326,19 @@ func (dq *diskQueue) maybeWritePending() { // Nothing to do right now return } + // Remove everything from pendingFrames and forward it to the writer loop. frames := dq.pendingFrames dq.pendingFrames = nil + dq.writerLoop.requestChan <- writerLoopRequest{frames: frames} - dq.writerLoop.requestChan <- writerLoopRequest{ - frames: frames, + // Compute the size of the request so we know how full the queue is going + // to be. + totalSize := uint64(0) + for _, sf := range frames { + totalSize += sf.frame.sizeOnDisk() } + dq.writeRequestSize = totalSize dq.writing = true } @@ -471,8 +477,12 @@ func (dq *diskQueue) canAcceptFrameOfSize(frameSize uint64) bool { // left in the queue after accounting for the existing segments and the // pending writes that were already accepted. pendingBytes := uint64(0) - for _, request := range dq.pendingFrames { - pendingBytes += request.frame.sizeOnDisk() + for _, sf := range dq.pendingFrames { + pendingBytes += sf.frame.sizeOnDisk() + } + // If a writing request is outstanding, include it in the size total. + if dq.writing { + pendingBytes += dq.writeRequestSize } currentSize := pendingBytes + dq.segments.sizeOnDisk() diff --git a/libbeat/publisher/queue/diskqueue/core_loop_test.go b/libbeat/publisher/queue/diskqueue/core_loop_test.go index 309a145968d..1eb9ff54a15 100644 --- a/libbeat/publisher/queue/diskqueue/core_loop_test.go +++ b/libbeat/publisher/queue/diskqueue/core_loop_test.go @@ -24,76 +24,267 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" ) -func TestProducerWriteRequest(t *testing.T) { - dq := &diskQueue{settings: DefaultSettings()} - frame := &writeFrame{ - serialized: make([]byte, 100), - } - request := producerWriteRequest{ - frame: frame, - shouldBlock: true, - responseChan: make(chan bool, 1), - } - dq.handleProducerWriteRequest(request) - - // The request inserts 100 bytes into an empty queue, so it should succeed. - // We expect: - // - the response channel should contain the value true - // - the frame should be added to pendingFrames and assigned to - // segment 0. - success, ok := <-request.responseChan - if !ok { - t.Error("Expected a response from the producer write request.") - } - if !success { - t.Error("Expected write request to succeed") - } +func TestHandleProducerWriteRequest(t *testing.T) { + // handleProducerWriteRequest should: + // - Immediately reject any frame larger than settings.MaxSegmentSize. + // - If dq.blockedProducers is nonempty (indicating that other frames are + // already waiting for empty space in the queue), or the queue doesn't + // have room for the new frame (see canAcceptFrameOfSize), then it is + // appended to blockedProducers if request.shouldBlock is true, and + // otherwise is rejected immediately. + // - Otherwise, the request is assigned a target segment and appended + // to pendingFrames. + // * If the frame fits in the current writing segment, it is assigned + // to that segment. Otherwise, it is assigned to segments.nextID + // and segments.nextID is incremented (see enqueueWriteFrame). - if len(dq.pendingFrames) != 1 { - t.Error("Expected 1 pending frame after a write request.") - } - if dq.pendingFrames[0].frame != frame { - t.Error("Expected pendingFrames to contain the new frame.") + // For this test setup, the queue is initialized with a max segment + // offset of 1000 and a max total size of 10000. + testCases := map[string]struct { + // The segment structure to start with before calling + // handleProducerWriteRequest + segments diskQueueSegments + + // Whether the blockedProducers list should be nonempty in the + // initial queue state. + blockedProducers bool + + // The size of the frame to send in the producer write request + frameSize int + + // The value to set shouldBlock to in the producer write request + shouldBlock bool + + // The result we expect on the requests's response channel, or + // nil if there should be none. + expectedResult *bool + + // The segment the frame should be assigned to in pendingFrames. + // This is ignored unless expectedResult is &true. + expectedSegment segmentID + }{ + "accept single frame when empty": { + segments: diskQueueSegments{nextID: 5}, + frameSize: 1000, + shouldBlock: false, + expectedResult: boolRef(true), + expectedSegment: 5, + }, + "reject immediately when frame is larger than segment limit": { + // max segment buffer size for the test wrapper is 1000. + frameSize: 1001, + shouldBlock: true, + expectedResult: boolRef(false), + }, + "accept with frame in new segment if current segment is full": { + segments: diskQueueSegments{ + writing: []*queueSegment{{}}, + nextWriteOffset: 600, + nextID: 1, + }, + frameSize: 500, + shouldBlock: false, + expectedResult: boolRef(true), + expectedSegment: 1, + }, + "reject when full and shouldBlock=false": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {endOffset: 9600}, + }, + }, + frameSize: 500, + shouldBlock: false, + expectedResult: boolRef(false), + }, + "block when full and shouldBlock=true": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {endOffset: 9600}, + }, + }, + frameSize: 500, + shouldBlock: true, + expectedResult: nil, + }, + "reject when blockedProducers is nonempty and shouldBlock=false": { + blockedProducers: true, + frameSize: 500, + shouldBlock: false, + expectedResult: boolRef(false), + }, + "block when blockedProducers is nonempty and shouldBlock=true": { + blockedProducers: true, + frameSize: 500, + shouldBlock: true, + expectedResult: nil, + }, } - if dq.pendingFrames[0].segment.id != 0 { - t.Error("Expected new frame to be assigned to segment 0.") + + settings := DefaultSettings() + settings.MaxSegmentSize = 1000 + segmentHeaderSize + settings.MaxBufferSize = 10000 + for description, test := range testCases { + dq := &diskQueue{ + logger: logp.L(), + settings: settings, + segments: test.segments, + } + if test.blockedProducers { + // Set an empty placeholder write request + dq.blockedProducers = []producerWriteRequest{{}} + } + initialBlockedProducerCount := len(dq.blockedProducers) + + // Construct a frame of the requested size. We subtract the + // metadata size from the buffer length, so test.frameSize + // corresponds to the "real" on-disk size of the frame. + request := producerWriteRequest{ + frame: makeWriteFrameWithSize(test.frameSize), + shouldBlock: test.shouldBlock, + responseChan: make(chan bool, 1), + } + + dq.handleProducerWriteRequest(request) + + var result *bool + select { + case r := <-request.responseChan: + result = &r + default: + // No response, result can stay nil. + } + + // Check that the result itself is correct. + if result != nil && test.expectedResult != nil { + if *result != *test.expectedResult { + t.Errorf("%s: expected response %v, got %v", + description, *test.expectedResult, *result) + } + } else if result == nil && test.expectedResult != nil { + t.Errorf("%s: expected response %v, got none", + description, *test.expectedResult) + } else if result != nil && test.expectedResult == nil { + t.Errorf("%s: expected no response, got %v", + description, *result) + } + // Check whether the request was added to blockedProducers. + if test.expectedResult != nil && + len(dq.blockedProducers) > initialBlockedProducerCount { + // Requests with responses shouldn't be added to + // blockedProducers. + t.Errorf("%s: request shouldn't be added to blockedProducers", + description) + } else if test.expectedResult == nil && + len(dq.blockedProducers) <= initialBlockedProducerCount { + // Requests without responses should be added to + // blockedProducers. + t.Errorf("%s: request should be added to blockedProducers", + description) + } + // Check whether the frame was added to pendingFrames. + var lastPendingFrame *segmentedFrame + if len(dq.pendingFrames) != 0 { + lastPendingFrame = &dq.pendingFrames[len(dq.pendingFrames)-1] + } + if test.expectedResult != nil && *test.expectedResult { + // If the result is success, the frame should now be + // enqueued. + if lastPendingFrame == nil || + lastPendingFrame.frame != request.frame { + t.Errorf("%s: frame should be added to pendingFrames", + description) + } else if lastPendingFrame.segment.id != test.expectedSegment { + t.Errorf("%s: expected frame to be in segment %v, got %v", + description, test.expectedSegment, + lastPendingFrame.segment.id) + } + // Check that segments.nextID is one more than the segment that + // was just assigned. + if lastPendingFrame != nil && + dq.segments.nextID != test.expectedSegment+1 { + t.Errorf("%s: expected segments.nextID to be %v, got %v", + description, test.expectedSegment+1, dq.segments.nextID) + } + } } } func TestHandleWriterLoopResponse(t *testing.T) { - // Initialize the queue with two writing segments only. + // handleWriterLoopResponse should: + // - Add the values in the bytesWritten array, in order, to the endOffset + // of the segments in segments.writing (these represent the amount + // written to each segment as a result of the preceding writer loop + // request). + // - If bytesWritten covers more than one writing segment, then move + // all except the last one from segments.writing to segments.reading. + // These invariants are relatively simple so this test is "by hand" + // rather than using a structured list of sub-cases. + dq := &diskQueue{ settings: DefaultSettings(), segments: diskQueueSegments{ writing: []*queueSegment{ - {id: 1}, + {id: 1, endOffset: 100}, {id: 2}, + {id: 3}, + {id: 4}, }, }, } - // This response says that the writer loop wrote 200 bytes to the first - // segment and 100 bytes to the second. + + // Write to one segment (no segments should be moved to reading list) dq.handleWriterLoopResponse(writerLoopResponse{ - bytesWritten: []int64{200, 100}, + bytesWritten: []int64{100}, }) - - // After the response is handled, we expect: - // - Each segment's endOffset should be incremented by the bytes written - // - Segment 1 should be moved to the reading list (because all but the - // last segment in a writer loop response has been closed) - // - Segment 2 should remain in the writing list - if len(dq.segments.reading) != 1 || dq.segments.reading[0].id != 1 { - t.Error("Expected segment 1 to move to the reading list") + if len(dq.segments.writing) != 4 || len(dq.segments.reading) != 0 { + t.Fatalf("expected 4 writing and 0 reading segments, got %v writing "+ + "and %v reading", len(dq.segments.writing), len(dq.segments.reading)) } - if len(dq.segments.writing) != 1 || dq.segments.writing[0].id != 2 { - t.Error("Expected segment 2 to remain in the writing list") + if dq.segments.writing[0].endOffset != 200 { + t.Errorf("expected first writing segment to be size 200, got %v", + dq.segments.writing[0].endOffset) + } + + // Write to two segments (the first one should be moved to reading list) + dq.handleWriterLoopResponse(writerLoopResponse{ + bytesWritten: []int64{100, 100}, + }) + if len(dq.segments.writing) != 3 || len(dq.segments.reading) != 1 { + t.Fatalf("expected 3 writing and 1 reading segments, got %v writing "+ + "and %v reading", len(dq.segments.writing), len(dq.segments.reading)) } - if dq.segments.reading[0].endOffset != 200 { - t.Errorf("Expected segment 1 endOffset 200, got %d", + if dq.segments.reading[0].endOffset != 300 { + t.Errorf("expected first reading segment to be size 300, got %v", dq.segments.reading[0].endOffset) } if dq.segments.writing[0].endOffset != 100 { - t.Errorf("Expected segment 2 endOffset 100, got %d", + t.Errorf("expected first writing segment to be size 100, got %v", + dq.segments.writing[0].endOffset) + } + + // Write to three segments (the first two should be moved to reading list) + dq.handleWriterLoopResponse(writerLoopResponse{ + bytesWritten: []int64{100, 100, 500}, + }) + if len(dq.segments.writing) != 1 || len(dq.segments.reading) != 3 { + t.Fatalf("expected 1 writing and 3 reading segments, got %v writing "+ + "and %v reading", len(dq.segments.writing), len(dq.segments.reading)) + } + if dq.segments.reading[0].endOffset != 300 { + t.Errorf("expected first reading segment to be size 300, got %v", + dq.segments.reading[0].endOffset) + } + if dq.segments.reading[1].endOffset != 200 { + t.Errorf("expected second reading segment to be size 200, got %v", + dq.segments.reading[1].endOffset) + } + if dq.segments.reading[2].endOffset != 100 { + t.Errorf("expected third reading segment to be size 100, got %v", + dq.segments.reading[2].endOffset) + } + if dq.segments.writing[0].endOffset != 500 { + t.Errorf("expected first writing segment to be size 500, got %v", dq.segments.writing[0].endOffset) } } @@ -111,7 +302,8 @@ func TestHandleReaderLoopResponse(t *testing.T) { // mark the remaining data as processed) testCases := map[string]struct { - // The segment structure to start with before calling maybeReadPending + // The segment structure to start with before calling + // handleReaderLoopResponse. segments diskQueueSegments response readerLoopResponse @@ -273,9 +465,10 @@ func TestHandleReaderLoopResponse(t *testing.T) { func TestMaybeReadPending(t *testing.T) { // maybeReadPending should: + // - If diskQueue.reading is true, do nothing and return immediately. // - If any unread data is available in a reading or writing segment, // send a readerLoopRequest for the full amount available in the - // first such segment. + // first such segment, and set diskQueue.reading to true. // - When creating a readerLoopRequest that includes the beginning of // a segment (startOffset == 0), set that segment's firstFrameID // to segments.nextReadFrameID (so ACKs based on frame ID can be linked @@ -287,6 +480,8 @@ func TestMaybeReadPending(t *testing.T) { testCases := map[string]struct { // The segment structure to start with before calling maybeReadPending segments diskQueueSegments + // The value of the diskQueue.reading flag before calling maybeReadPending + reading bool // The request we expect to see on the reader loop's request channel, // or nil if there should be none. expectedRequest *readerLoopRequest @@ -308,6 +503,15 @@ func TestMaybeReadPending(t *testing.T) { endOffset: 1000, }, }, + "do nothing if reading flag is set": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + }, + reading: true, + expectedRequest: nil, + }, "read the end of a segment": { segments: diskQueueSegments{ reading: []*queueSegment{ @@ -402,6 +606,7 @@ func TestMaybeReadPending(t *testing.T) { readerLoop: &readerLoop{ requestChan: make(chan readerLoopRequest, 1), }, + reading: test.reading, } firstFrameID := test.segments.nextReadFrameID dq.maybeReadPending() @@ -421,6 +626,10 @@ func TestMaybeReadPending(t *testing.T) { t.Errorf( "%s: maybeReadPending should update firstFrameID", description) } + if !dq.reading { + t.Errorf( + "%s: maybeReadPending should set the reading flag", description) + } default: if test.expectedRequest != nil { t.Errorf("%s: expected read request %v, got none", @@ -446,10 +655,322 @@ func TestMaybeReadPending(t *testing.T) { } } +func TestMaybeWritePending(t *testing.T) { + // maybeWritePending should: + // - If diskQueue.writing is true, do nothing and return immediately. + // - Otherwise, if diskQueue.pendingFrames is nonempty: + // * send its contents as a writer loop request + // * set diskQueue.writeRequestSize to the total size of the + // request's frames + // * reset diskQueue.pendingFrames to nil + // * set diskQueue.writing to true. + dq := &diskQueue{ + settings: DefaultSettings(), + writerLoop: &writerLoop{ + requestChan: make(chan writerLoopRequest, 1), + }, + } + + // First call: pendingFrames is empty, should do nothing. + dq.maybeWritePending() + select { + case request := <-dq.writerLoop.requestChan: + t.Errorf("expected no request on empty pendingFrames, got %v", request) + default: + if dq.writing { + t.Errorf( + "maybeWritePending shouldn't set writing flag without a request") + } + } + + // Set up some frame data for the remaining calls. + pendingFrames := []segmentedFrame{ + {frame: makeWriteFrameWithSize(100)}, + {frame: makeWriteFrameWithSize(200)}} + // The size on disk should be the summed buffer lengths plus + // frameMetadataSize times the number of frames + expectedSize := uint64(300) + + // Second call: writing is true, should do nothing. + dq.pendingFrames = pendingFrames + dq.writing = true + dq.maybeWritePending() + select { + case request := <-dq.writerLoop.requestChan: + t.Errorf("expected no request with writing flag set, got %v", request) + default: + } + + // Third call: writing is false, should send a request with pendingFrames. + dq.writing = false + dq.maybeWritePending() + select { + case request := <-dq.writerLoop.requestChan: + // We are extra strict, because we can afford to be: the request should + // contain not just the same elements, but the exact same array (slice) + // as the previous value of pendingFrames. + if len(request.frames) != len(pendingFrames) || + &request.frames[0] != &pendingFrames[0] { + t.Errorf( + "expected request containing pendingFrames, got a different array") + } + if dq.writeRequestSize != expectedSize { + t.Errorf("expected writeRequestSize to equal %v, got %v", + expectedSize, dq.writeRequestSize) + } + if len(dq.pendingFrames) != 0 { + t.Errorf("pendingFrames should be reset after a write request") + } + if !dq.writing { + t.Errorf("the writing flag should be set after a write request") + } + default: + } +} + +func TestMaybeUnblockProducers(t *testing.T) { + // maybeUnblockProducers should: + // - As long as diskQueue.blockedProducers is nonempty and the queue has + // capacity to add its first element (see TestCanAcceptFrameOfSize): + // * Add the request's frame to diskQueue.pendingFrames (see + // enqueueWriteFrame) + // * Report success (true) to the producer's response channel + // * Remove the request from blockedProducers + // When complete, either blockedProducers should be empty or its first + // element should be too big to add to the queue. + + settings := DefaultSettings() + settings.MaxBufferSize = 1000 + responseChans := []chan bool{ + make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)} + dq := &diskQueue{ + settings: settings, + segments: diskQueueSegments{ + writing: []*queueSegment{segmentWithSize(100)}, + }, + blockedProducers: []producerWriteRequest{ + { + frame: makeWriteFrameWithSize(200), + responseChan: responseChans[0], + }, + { + frame: makeWriteFrameWithSize(200), + responseChan: responseChans[1], + }, + { + frame: makeWriteFrameWithSize(501), + responseChan: responseChans[2], + }, + }, + } + + // First call: we expect two producers to be unblocked, because the third + // one would push us one byte above the 1000 byte limit. + dq.maybeUnblockProducers() + if len(dq.pendingFrames) != 2 || len(dq.blockedProducers) != 1 { + t.Fatalf("Expected 2 pending frames and 1 blocked producer, got %v and %v", + len(dq.pendingFrames), len(dq.blockedProducers)) + } + for i := 0; i < 3; i++ { + select { + case response := <-responseChans[i]: + if i < 2 && !response { + t.Errorf("Expected success response for producer %v, got failure", i) + } else if i == 2 { + t.Fatalf("Expected no response for producer 2, got %v", response) + } + default: + if i < 2 { + t.Errorf("Expected success response for producer %v, got none", i) + } + } + } + + dq.blockedProducers[0].frame = makeWriteFrameWithSize(500) + // Second call: with the blocked request one byte smaller, it should fit + // into the queue, and be added with the other pending frames. + dq.maybeUnblockProducers() + if len(dq.pendingFrames) != 3 || len(dq.blockedProducers) != 0 { + t.Fatalf("Expected 3 pending frames and 0 blocked producers, got %v and %v", + len(dq.pendingFrames), len(dq.blockedProducers)) + } + for i := 0; i < 3; i++ { + // This time the first two response channels should get nothing and the + // third should get success. + select { + case response := <-responseChans[i]: + if i < 2 { + t.Errorf("Expected no response for producer %v, got %v", i, response) + } else if !response { + t.Errorf("Expected success response for producer 2, got failure") + } + default: + if i == 2 { + t.Errorf("Expected success response for producer 2, got none") + } + } + } +} + +func TestCanAcceptFrameOfSize(t *testing.T) { + // canAcceptFrameOfSize decides whether the queue has enough free capacity + // to accept an incoming frame of the given size. It should: + // - If the length of pendingFrames is >= settings.WriteAheadLimit, + // return false. + // - If the queue size is unbounded (MaxBufferSize == 0), return true. + // - Otherwise, return true iff the total size of the queue plus the new + // frame is <= settings.MaxBufferSize. + // The size of the queue is calculated as the summed size of: + // * All segments listed in diskQueue.segments (writing, reading, acking, + // acked) + // * All frames in diskQueue.pendingFrames (which have been accepted but + // not yet written) + // * If a write request is outstanding (diskQueue.writing == true), + // diskQueue.writeRequestSize, which is the size of the data that is + // being written by writerLoop but hasn't yet been completed. + // All test cases are run with WriteAheadLimit = 2. + + testCases := map[string]struct { + // The value of settings.MaxBufferSize in the test queue. + maxBufferSize uint64 + // The value of the segments field in the test queue. + segments diskQueueSegments + // The value of pendingFrames in the test queue. + pendingFrames []segmentedFrame + // The value of writeRequestSize (the size of the most recent write + // request) in the test queue. + writeRequestSize uint64 + // The value of the writing flag in the test queue (writeRequestSize is + // included in the queue size calculation only if there is an active + // writing request). + writing bool + + // If expectedOutcomes[v] = b then canAcceptFrameOfSize(v) should return b. + expectedOutcomes map[uint64]bool + }{ + "always reject when at the write ahead limit": { + maxBufferSize: 1000, + pendingFrames: []segmentedFrame{ + {frame: makeWriteFrameWithSize(10)}, + {frame: makeWriteFrameWithSize(10)}, + }, + expectedOutcomes: map[uint64]bool{10: false}, + }, + "always accept when queue size is unbounded": { + maxBufferSize: 0, + expectedOutcomes: map[uint64]bool{ + 1: true, 1000: true, 1000000: true, 1000000000: true, + }, + }, + // The remaining cases are all below the write ahead limit and have + // bounded buffer size, we are just testing that the various + // source values are all accounted for. + "pendingFrames counts against buffer capacity": { + maxBufferSize: 1000, + pendingFrames: []segmentedFrame{ + {frame: makeWriteFrameWithSize(500)}, + }, + // There should be exactly 500 bytes capacity left + expectedOutcomes: map[uint64]bool{ + 500: true, 501: false, + }, + }, + "diskQueue.segments counts against buffer capacity": { + maxBufferSize: 1000, + segments: diskQueueSegments{ + writing: []*queueSegment{segmentWithSize(100)}, + reading: []*queueSegment{segmentWithSize(100)}, + acking: []*queueSegment{segmentWithSize(100)}, + acked: []*queueSegment{segmentWithSize(100)}, + }, + // Four segments of size 100, should be exactly 600 bytes left + expectedOutcomes: map[uint64]bool{ + 600: true, 601: false, + }, + }, + "writeRequestSize counts against buffer capacity when writing=true": { + maxBufferSize: 1000, + writeRequestSize: 600, + writing: true, + expectedOutcomes: map[uint64]bool{ + 400: true, 401: false, + }, + }, + "writeRequestSize doesn't count against buffer capacity when writing=false": { + maxBufferSize: 1000, + writeRequestSize: 600, + writing: false, + expectedOutcomes: map[uint64]bool{ + 1000: true, 1001: false, + }, + }, + "buffer capacity includes the sum of all sources": { + // include all of them together. + maxBufferSize: 1000, + segments: diskQueueSegments{ + writing: []*queueSegment{segmentWithSize(100)}, + reading: []*queueSegment{segmentWithSize(100)}, + acking: []*queueSegment{segmentWithSize(100)}, + acked: []*queueSegment{segmentWithSize(100)}, + }, + pendingFrames: []segmentedFrame{ + {frame: makeWriteFrameWithSize(100)}, + }, + writeRequestSize: 200, + writing: true, + expectedOutcomes: map[uint64]bool{ + 300: true, 301: false, + }, + }, + } + + for description, test := range testCases { + settings := DefaultSettings() + settings.WriteAheadLimit = 2 + settings.MaxBufferSize = test.maxBufferSize + dq := &diskQueue{ + settings: settings, + segments: test.segments, + pendingFrames: test.pendingFrames, + writeRequestSize: test.writeRequestSize, + writing: test.writing, + } + for size, expected := range test.expectedOutcomes { + result := dq.canAcceptFrameOfSize(size) + if result != expected { + t.Errorf("%v: expected canAcceptFrameOfSize(%v) = %v, got %v", + description, size, expected, result) + } + } + } +} + +func boolRef(b bool) *bool { + return &b +} + func segmentIDRef(id segmentID) *segmentID { return &id } +// Convenience helper that creates a frame that will have the given size on +// disk after accounting for header / footer size. +func makeWriteFrameWithSize(size int) *writeFrame { + if size <= frameMetadataSize { + // Frames must have a nonempty data region. + return nil + } + return &writeFrame{serialized: make([]byte, size-frameMetadataSize)} +} + +func segmentWithSize(size int) *queueSegment { + if size < segmentHeaderSize { + // Can't have a segment smaller than the segment header + return nil + } + return &queueSegment{endOffset: segmentOffset(size - segmentHeaderSize)} +} + func equalReaderLoopRequests( r0 readerLoopRequest, r1 readerLoopRequest, ) bool { diff --git a/libbeat/publisher/queue/diskqueue/queue.go b/libbeat/publisher/queue/diskqueue/queue.go index 5f756996e5f..1819ced21d5 100644 --- a/libbeat/publisher/queue/diskqueue/queue.go +++ b/libbeat/publisher/queue/diskqueue/queue.go @@ -55,6 +55,11 @@ type diskQueue struct { // otherwise. writing bool + // If writing is true, then writeRequestSize equals the number of bytes it + // contained. Used to calculate how much free capacity the queue has left + // after all scheduled writes have been completed (see canAcceptFrameOfSize). + writeRequestSize uint64 + // reading is true if the reader loop is processing a request, false // otherwise. reading bool From 3842bee898759378781ec37e0d0697637fcff60b Mon Sep 17 00:00:00 2001 From: Lee Hinman <57081003+leehinman@users.noreply.github.com> Date: Thu, 22 Oct 2020 16:56:37 -0500 Subject: [PATCH 34/40] Incorporate librpm fix feedback (#22098) - re-order imports - fix capitalization in error string --- x-pack/auditbeat/module/system/package/rpm_linux.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/x-pack/auditbeat/module/system/package/rpm_linux.go b/x-pack/auditbeat/module/system/package/rpm_linux.go index 6e5df7e0c6e..399c121f878 100644 --- a/x-pack/auditbeat/module/system/package/rpm_linux.go +++ b/x-pack/auditbeat/module/system/package/rpm_linux.go @@ -7,6 +7,7 @@ package pkg import ( + "debug/elf" "errors" "fmt" "runtime" @@ -14,8 +15,6 @@ import ( "time" "unsafe" - "debug/elf" - "github.com/coreos/pkg/dlopen" ) @@ -257,7 +256,7 @@ func openLibrpm() (*librpm, error) { librpm.handle, err = dlopen.GetHandle(librpmNames) if err != nil { - return nil, fmt.Errorf("Couldn't open %v", librpmNames) + return nil, fmt.Errorf("couldn't open %v: %v", librpmNames, err) } librpm.rpmtsCreate, err = librpm.handle.GetSymbolPointer("rpmtsCreate") From 14326dc5f40f67868db1c7ba1a76200f5f2791af Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Thu, 22 Oct 2020 20:34:14 -0700 Subject: [PATCH 35/40] Edit 6.8.13 release notes (#22120) --- CHANGELOG.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 349eb49edb3..df4e85892e3 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -2607,7 +2607,7 @@ https://github.com/elastic/beats/compare/v6.8.12...v6.8.13[View commits] *Filebeat* -- Add container image in Kubernetes metadata {pull}13356[13356] {issue}12688[12688] +- Add container image in Kubernetes metadata. {pull}13356[13356] {issue}12688[12688] [[release-notes-6.8.12]] === Beats version 6.8.12 From e74e886884ffbc1ca5c59ef636bfaed41792cbe7 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Thu, 22 Oct 2020 20:36:05 -0700 Subject: [PATCH 36/40] Edit 7.9.3 changelog (#22117) --- CHANGELOG.asciidoc | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index df4e85892e3..61353d3afdb 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -11,20 +11,21 @@ https://github.com/elastic/beats/compare/v7.9.2...v7.9.3[View commits] *Affecting all Beats* -- The `o365input` and `o365` module now recover from an authentication problem or other fatal errors, instead of terminating. {pull}21258[21258] +- The `o365audit` input and `o365` module now recover from an authentication problem or other fatal errors, instead of terminating. {pull}21258[21258] *Auditbeat* -- system/socket: Fixed a crash due to concurrent map read and write. {issue}21192[21192] {pull}21690[21690] +- system/socket: Fix a crash due to concurrent map read and write. {issue}21192[21192] {pull}21690[21690] *Filebeat* - Add field limit check for AWS Cloudtrail flattened fields. {pull}21388[21388] {issue}21382[21382] + *Metricbeat* -- Fix remote_write flaky test. {pull}21173[21173] -- Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] -- [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] +- Fix `remote_write` flaky test. {pull}21173[21173] +- Fix panic in Kubernetes autodiscovery caused by storing stateless keystores. {issue}21843[21843] {pull}21880[21880] +- Remove redundant dockersock volume mount to avoid problems on Kubernetes deployments that do not use docker as the container runtime. {pull}22009[22009] [[release-notes-7.9.2]] From eb695ef4312a5dffaa708f1591ebaf5b7800d9ea Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Thu, 22 Oct 2020 20:37:17 -0700 Subject: [PATCH 37/40] Add fleet settings image (#22065) --- x-pack/elastic-agent/docs/run-elastic-agent.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc b/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc index 34bb2481f7f..2314f7652f4 100644 --- a/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc +++ b/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc @@ -27,7 +27,7 @@ For self-managed installations, set the URLs for {es} and {kib}, including the http ports, then save your changes. + [role="screenshot"] -//image::images/kibana-fleet-settings.png[{fleet} settings] +image::images/kibana-fleet-settings.png[{fleet} settings] . Select **Agents**, then click **Add agent** to get an enrollment token. See <> for detailed steps. From 155dfda99aeb7a74383aa9023d4f350c4d5da668 Mon Sep 17 00:00:00 2001 From: Marc Guasch Date: Fri, 23 Oct 2020 09:12:12 +0200 Subject: [PATCH 38/40] Change x509 mappings from file. to tls.server. (#22097) --- .../module/suricata/eve/ingest/pipeline.yml | 32 ++++++------ .../eve/test/eve-alerts.log-expected.json | 52 +++++++++---------- .../eve/test/eve-small.log-expected.json | 24 ++++----- 3 files changed, 54 insertions(+), 54 deletions(-) diff --git a/x-pack/filebeat/module/suricata/eve/ingest/pipeline.yml b/x-pack/filebeat/module/suricata/eve/ingest/pipeline.yml index 01ed5accbe6..e132a8acdde 100644 --- a/x-pack/filebeat/module/suricata/eve/ingest/pipeline.yml +++ b/x-pack/filebeat/module/suricata/eve/ingest/pipeline.yml @@ -247,27 +247,27 @@ processors: ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.C - target_field: file.x509.issuer.country + target_field: tls.server.x509.issuer.country ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.CN - target_field: file.x509.issuer.common_name + target_field: tls.server.x509.issuer.common_name ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.L - target_field: file.x509.issuer.locality + target_field: tls.server.x509.issuer.locality ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.O - target_field: file.x509.issuer.organization + target_field: tls.server.x509.issuer.organization ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.OU - target_field: file.x509.issuer.organizational_unit + target_field: tls.server.x509.issuer.organizational_unit ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.ST - target_field: file.x509.issuer.state_or_province + target_field: tls.server.x509.issuer.state_or_province ignore_missing: true - gsub: field: suricata.eve.tls.subject @@ -282,34 +282,34 @@ processors: ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.C - target_field: file.x509.subject.country + target_field: tls.server.x509.subject.country ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.CN - target_field: file.x509.subject.common_name + target_field: tls.server.x509.subject.common_name ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.L - target_field: file.x509.subject.locality + target_field: tls.server.x509.subject.locality ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.O - target_field: file.x509.subject.organization + target_field: tls.server.x509.subject.organization ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.OU - target_field: file.x509.subject.organizational_unit + target_field: tls.server.x509.subject.organizational_unit ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.ST - target_field: file.x509.subject.state_or_province + target_field: tls.server.x509.subject.state_or_province ignore_missing: true - set: - field: file.x509.serial_number + field: tls.server.x509.serial_number value: '{{suricata.eve.tls.serial}}' ignore_empty_value: true - gsub: - field: file.x509.serial_number + field: tls.server.x509.serial_number pattern: ':' replacement: '' ignore_missing: true @@ -326,11 +326,11 @@ processors: - ISO8601 if: ctx.suricata?.eve?.tls?.notbefore != null - set: - field: file.x509.not_after + field: tls.server.x509.not_after value: '{{tls.server.not_after}}' ignore_empty_value: true - set: - field: file.x509.not_before + field: tls.server.x509.not_before value: '{{tls.server.not_before}}' ignore_empty_value: true - append: diff --git a/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json b/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json index a63e2fd592a..ecccab3a10f 100644 --- a/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json +++ b/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json @@ -1633,17 +1633,6 @@ "event.type": [ "protocol" ], - "file.x509.issuer.common_name": "Google Internet Authority G2", - "file.x509.issuer.country": "US", - "file.x509.issuer.organization": "Google Inc", - "file.x509.not_after": "2024-07-16T14:52:35.000Z", - "file.x509.not_before": "2019-07-17T14:52:35.000Z", - "file.x509.serial_number": "001122334455667788", - "file.x509.subject.common_name": "*.google.com", - "file.x509.subject.country": "US", - "file.x509.subject.locality": "Mountain View", - "file.x509.subject.organization": "Google Inc", - "file.x509.subject.state_or_province": "California", "fileset.name": "eve", "input.type": "log", "log.offset": 16546, @@ -1687,6 +1676,17 @@ "tls.server.not_after": "2024-07-16T14:52:35.000Z", "tls.server.not_before": "2019-07-17T14:52:35.000Z", "tls.server.subject": "C=US, ST=California, L=Mountain View, O=Google Inc, CN=*.google.com", + "tls.server.x509.issuer.common_name": "Google Internet Authority G2", + "tls.server.x509.issuer.country": "US", + "tls.server.x509.issuer.organization": "Google Inc", + "tls.server.x509.not_after": "2024-07-16T14:52:35.000Z", + "tls.server.x509.not_before": "2019-07-17T14:52:35.000Z", + "tls.server.x509.serial_number": "001122334455667788", + "tls.server.x509.subject.common_name": "*.google.com", + "tls.server.x509.subject.country": "US", + "tls.server.x509.subject.locality": "Mountain View", + "tls.server.x509.subject.organization": "Google Inc", + "tls.server.x509.subject.state_or_province": "California", "tls.version": "1.2", "tls.version_protocol": "tls" }, @@ -1711,21 +1711,6 @@ "event.type": [ "allowed" ], - "file.x509.issuer.common_name": "Unknown", - "file.x509.issuer.country": "Unknown", - "file.x509.issuer.locality": "Unknown", - "file.x509.issuer.organization": "Unknown", - "file.x509.issuer.organizational_unit": "Unknown", - "file.x509.issuer.state_or_province": "Unknown", - "file.x509.not_after": "2026-06-25T17:36:29.000Z", - "file.x509.not_before": "2016-06-27T17:36:29.000Z", - "file.x509.serial_number": "72A92C51", - "file.x509.subject.common_name": "Unknown", - "file.x509.subject.country": "Unknown", - "file.x509.subject.locality": "Unknown", - "file.x509.subject.organization": "Unknown", - "file.x509.subject.organizational_unit": "Unknown", - "file.x509.subject.state_or_province": "Unknown", "fileset.name": "eve", "input.type": "log", "log.offset": 17541, @@ -1781,6 +1766,21 @@ "tls.server.not_after": "2026-06-25T17:36:29.000Z", "tls.server.not_before": "2016-06-27T17:36:29.000Z", "tls.server.subject": "C=Unknown, ST=Unknown, L=Unknown, O=Unknown, OU=Unknown, CN=Unknown", + "tls.server.x509.issuer.common_name": "Unknown", + "tls.server.x509.issuer.country": "Unknown", + "tls.server.x509.issuer.locality": "Unknown", + "tls.server.x509.issuer.organization": "Unknown", + "tls.server.x509.issuer.organizational_unit": "Unknown", + "tls.server.x509.issuer.state_or_province": "Unknown", + "tls.server.x509.not_after": "2026-06-25T17:36:29.000Z", + "tls.server.x509.not_before": "2016-06-27T17:36:29.000Z", + "tls.server.x509.serial_number": "72A92C51", + "tls.server.x509.subject.common_name": "Unknown", + "tls.server.x509.subject.country": "Unknown", + "tls.server.x509.subject.locality": "Unknown", + "tls.server.x509.subject.organization": "Unknown", + "tls.server.x509.subject.organizational_unit": "Unknown", + "tls.server.x509.subject.state_or_province": "Unknown", "tls.version": "1.2", "tls.version_protocol": "tls" } diff --git a/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json b/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json index 4851f2db826..2db09a8ee38 100644 --- a/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json +++ b/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json @@ -430,18 +430,6 @@ "event.type": [ "protocol" ], - "file.x509.issuer.common_name": "Apple IST CA 2 - G1", - "file.x509.issuer.country": "US", - "file.x509.issuer.organization": "Apple Inc.", - "file.x509.issuer.organizational_unit": "Certification Authority", - "file.x509.not_after": "2019-03-29T17:54:31.000Z", - "file.x509.not_before": "2017-02-27T17:54:31.000Z", - "file.x509.serial_number": "5C9CE1097887F807", - "file.x509.subject.common_name": "*.icloud.com", - "file.x509.subject.country": "US", - "file.x509.subject.organization": "Apple Inc.", - "file.x509.subject.organizational_unit": "management:idms.group.506364", - "file.x509.subject.state_or_province": "California", "fileset.name": "eve", "input.type": "log", "log.offset": 4683, @@ -479,6 +467,18 @@ "tls.server.not_after": "2019-03-29T17:54:31.000Z", "tls.server.not_before": "2017-02-27T17:54:31.000Z", "tls.server.subject": "CN=*.icloud.com, OU=management:idms.group.506364, O=Apple Inc., ST=California, C=US", + "tls.server.x509.issuer.common_name": "Apple IST CA 2 - G1", + "tls.server.x509.issuer.country": "US", + "tls.server.x509.issuer.organization": "Apple Inc.", + "tls.server.x509.issuer.organizational_unit": "Certification Authority", + "tls.server.x509.not_after": "2019-03-29T17:54:31.000Z", + "tls.server.x509.not_before": "2017-02-27T17:54:31.000Z", + "tls.server.x509.serial_number": "5C9CE1097887F807", + "tls.server.x509.subject.common_name": "*.icloud.com", + "tls.server.x509.subject.country": "US", + "tls.server.x509.subject.organization": "Apple Inc.", + "tls.server.x509.subject.organizational_unit": "management:idms.group.506364", + "tls.server.x509.subject.state_or_province": "California", "tls.version": "1.2", "tls.version_protocol": "tls" }, From 2f7b15b7da8f41ef1534d5c5a3c1ac80d9ffbd40 Mon Sep 17 00:00:00 2001 From: Marc Guasch Date: Mon, 26 Oct 2020 10:15:05 +0100 Subject: [PATCH 39/40] Use default config when creating the input (#22126) --- x-pack/filebeat/input/httpjson/input_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/filebeat/input/httpjson/input_manager.go b/x-pack/filebeat/input/httpjson/input_manager.go index 21f5066dc05..8d7e6070786 100644 --- a/x-pack/filebeat/input/httpjson/input_manager.go +++ b/x-pack/filebeat/input/httpjson/input_manager.go @@ -36,7 +36,7 @@ func (m inputManager) Init(grp unison.Group, mode v2.Mode) error { // Create creates a cursor input manager if the config has a date cursor set up, // otherwise it creates a stateless input manager. func (m inputManager) Create(cfg *common.Config) (v2.Input, error) { - var config config + config := newDefaultConfig() if err := cfg.Unpack(&config); err != nil { return nil, err } From a56193354a5a24b003ac33243916d42f7e39274f Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 26 Oct 2020 11:22:26 +0000 Subject: [PATCH 40/40] [CI] support windows-10 (#19804) --- Jenkinsfile.yml | 2 +- auditbeat/Jenkinsfile.yml | 11 +++++++++++ filebeat/Jenkinsfile.yml | 13 +++++++++++++ heartbeat/Jenkinsfile.yml | 11 +++++++++++ metricbeat/Jenkinsfile.yml | 11 +++++++++++ packetbeat/Jenkinsfile.yml | 11 +++++++++++ winlogbeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/auditbeat/Jenkinsfile.yml | 13 ++++++++++++- x-pack/elastic-agent/Jenkinsfile.yml | 11 +++++++++++ x-pack/filebeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/functionbeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/metricbeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/packetbeat/Jenkinsfile.yml | 13 ++++++++++++- x-pack/winlogbeat/Jenkinsfile.yml | 11 +++++++++++ 14 files changed, 148 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile.yml b/Jenkinsfile.yml index f7b21e1cbdf..cc35232d6d0 100644 --- a/Jenkinsfile.yml +++ b/Jenkinsfile.yml @@ -8,7 +8,7 @@ projects: - "libbeat" - "metricbeat" - "packetbeat" - - "winlogbeat" + ##- "winlogbeat" See https://github.com/elastic/beats/issues/22046 - "x-pack/auditbeat" - "x-pack/dockerlogbeat" - "x-pack/elastic-agent" diff --git a/auditbeat/Jenkinsfile.yml b/auditbeat/Jenkinsfile.yml index b3f20af2d37..c68b5689f48 100644 --- a/auditbeat/Jenkinsfile.yml +++ b/auditbeat/Jenkinsfile.yml @@ -69,3 +69,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test auditbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/filebeat/Jenkinsfile.yml b/filebeat/Jenkinsfile.yml index 09dbe948c72..d8ea11c24a5 100644 --- a/filebeat/Jenkinsfile.yml +++ b/filebeat/Jenkinsfile.yml @@ -57,3 +57,16 @@ stages: - "windows-2016" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test filebeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/heartbeat/Jenkinsfile.yml b/heartbeat/Jenkinsfile.yml index 1d41dbe581e..032ec411892 100644 --- a/heartbeat/Jenkinsfile.yml +++ b/heartbeat/Jenkinsfile.yml @@ -67,4 +67,15 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test heartbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/metricbeat/Jenkinsfile.yml b/metricbeat/Jenkinsfile.yml index bdd45090837..e6c4ffcef0e 100644 --- a/metricbeat/Jenkinsfile.yml +++ b/metricbeat/Jenkinsfile.yml @@ -62,3 +62,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test metricbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/packetbeat/Jenkinsfile.yml b/packetbeat/Jenkinsfile.yml index adf6471b82a..ef373bb5f15 100644 --- a/packetbeat/Jenkinsfile.yml +++ b/packetbeat/Jenkinsfile.yml @@ -67,3 +67,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test packetbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/winlogbeat/Jenkinsfile.yml b/winlogbeat/Jenkinsfile.yml index 94b36b0e647..3ec79093ca4 100644 --- a/winlogbeat/Jenkinsfile.yml +++ b/winlogbeat/Jenkinsfile.yml @@ -41,3 +41,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test winlogbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/auditbeat/Jenkinsfile.yml b/x-pack/auditbeat/Jenkinsfile.yml index 1294c4681b4..f4e55ea6372 100644 --- a/x-pack/auditbeat/Jenkinsfile.yml +++ b/x-pack/auditbeat/Jenkinsfile.yml @@ -52,7 +52,7 @@ stages: - "windows-2016" when: ## Override the top-level when. comments: - - "/test auditbeat for windows-2016" + - "/test x-pack/auditbeat for windows-2016" labels: - "windows-2016" branches: true ## for all the branches @@ -68,3 +68,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/auditbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/elastic-agent/Jenkinsfile.yml b/x-pack/elastic-agent/Jenkinsfile.yml index bf1bfed3ddd..d324e3381af 100644 --- a/x-pack/elastic-agent/Jenkinsfile.yml +++ b/x-pack/elastic-agent/Jenkinsfile.yml @@ -67,3 +67,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/elastic-agent for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/filebeat/Jenkinsfile.yml b/x-pack/filebeat/Jenkinsfile.yml index 5bd2bcd40cf..d28520b7c32 100644 --- a/x-pack/filebeat/Jenkinsfile.yml +++ b/x-pack/filebeat/Jenkinsfile.yml @@ -68,3 +68,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/filebeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/functionbeat/Jenkinsfile.yml b/x-pack/functionbeat/Jenkinsfile.yml index ecb2bd14e0e..117d92e3179 100644 --- a/x-pack/functionbeat/Jenkinsfile.yml +++ b/x-pack/functionbeat/Jenkinsfile.yml @@ -65,3 +65,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/functionbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/metricbeat/Jenkinsfile.yml b/x-pack/metricbeat/Jenkinsfile.yml index 60a593c488d..8506eb12e69 100644 --- a/x-pack/metricbeat/Jenkinsfile.yml +++ b/x-pack/metricbeat/Jenkinsfile.yml @@ -57,3 +57,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/metricbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/packetbeat/Jenkinsfile.yml b/x-pack/packetbeat/Jenkinsfile.yml index e3fa9ad0324..a3c11636dc6 100644 --- a/x-pack/packetbeat/Jenkinsfile.yml +++ b/x-pack/packetbeat/Jenkinsfile.yml @@ -24,7 +24,7 @@ stages: - "windows-2016" when: ## Override the top-level when. comments: - - "/test x-pack/winlogbeat for windows-2016" + - "/test x-pack/packetbeat for windows-2016" labels: - "windows-2016" branches: true ## for all the branches @@ -40,3 +40,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/packetbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/winlogbeat/Jenkinsfile.yml b/x-pack/winlogbeat/Jenkinsfile.yml index 371f0aa6f48..45dfcad9d45 100644 --- a/x-pack/winlogbeat/Jenkinsfile.yml +++ b/x-pack/winlogbeat/Jenkinsfile.yml @@ -40,3 +40,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/winlogbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags