summaryrefslogtreecommitdiffstats
path: root/src/go/plugin/go.d/modules/logstash
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-26 08:15:24 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-26 08:15:35 +0000
commitf09848204fa5283d21ea43e262ee41aa578e1808 (patch)
treec62385d7adf209fa6a798635954d887f718fb3fb /src/go/plugin/go.d/modules/logstash
parentReleasing debian version 1.46.3-2. (diff)
downloadnetdata-f09848204fa5283d21ea43e262ee41aa578e1808.tar.xz
netdata-f09848204fa5283d21ea43e262ee41aa578e1808.zip
Merging upstream version 1.47.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/go/plugin/go.d/modules/logstash')
l---------src/go/plugin/go.d/modules/logstash/README.md1
-rw-r--r--src/go/plugin/go.d/modules/logstash/charts.go236
-rw-r--r--src/go/plugin/go.d/modules/logstash/collect.go91
-rw-r--r--src/go/plugin/go.d/modules/logstash/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/logstash/integrations/logstash.md283
-rw-r--r--src/go/plugin/go.d/modules/logstash/logstash.go114
-rw-r--r--src/go/plugin/go.d/modules/logstash/logstash_test.go253
-rw-r--r--src/go/plugin/go.d/modules/logstash/metadata.yaml274
-rw-r--r--src/go/plugin/go.d/modules/logstash/node_stats.go65
-rw-r--r--src/go/plugin/go.d/modules/logstash/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/logstash/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/logstash/testdata/stats.json252
12 files changed, 1789 insertions, 0 deletions
diff --git a/src/go/plugin/go.d/modules/logstash/README.md b/src/go/plugin/go.d/modules/logstash/README.md
new file mode 120000
index 00000000..7a35ae8f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/README.md
@@ -0,0 +1 @@
+integrations/logstash.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/logstash/charts.go b/src/go/plugin/go.d/modules/logstash/charts.go
new file mode 100644
index 00000000..3fed45f4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/charts.go
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logstash
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioJVMThreads = module.Priority + iota
+ prioJVMMemHeapUsed
+ prioJVMMemHeap
+ prioJVMMemPoolsEden
+ prioJVMMemPoolsSurvivor
+ prioJVMMemPoolsOld
+ prioJVMGCCollectorCount
+ prioJVMGCCollectorTime
+ prioOpenFileDescriptors
+ prioEvent
+ prioEventDuration
+ prioPipelineEvent
+ prioPipelineEventDurations
+ prioUptime
+)
+
+var charts = module.Charts{
+ // thread
+ {
+ ID: "jvm_threads",
+ Title: "JVM Threads",
+ Units: "count",
+ Fam: "threads",
+ Ctx: "logstash.jvm_threads",
+ Priority: prioJVMThreads,
+ Dims: module.Dims{
+ {ID: "jvm_threads_count", Name: "threads"},
+ },
+ },
+ // memory
+ {
+ ID: "jvm_mem_heap_used",
+ Title: "JVM Heap Memory Percentage",
+ Units: "percentage",
+ Fam: "memory",
+ Ctx: "logstash.jvm_mem_heap_used",
+ Priority: prioJVMMemHeapUsed,
+ Dims: module.Dims{
+ {ID: "jvm_mem_heap_used_percent", Name: "in use"},
+ },
+ },
+ {
+ ID: "jvm_mem_heap",
+ Title: "JVM Heap Memory",
+ Units: "KiB",
+ Fam: "memory",
+ Ctx: "logstash.jvm_mem_heap",
+ Type: module.Area,
+ Priority: prioJVMMemHeap,
+ Dims: module.Dims{
+ {ID: "jvm_mem_heap_committed_in_bytes", Name: "committed", Div: 1024},
+ {ID: "jvm_mem_heap_used_in_bytes", Name: "used", Div: 1024},
+ },
+ },
+ {
+ ID: "jvm_mem_pools_eden",
+ Title: "JVM Pool Eden Memory",
+ Units: "KiB",
+ Fam: "memory",
+ Ctx: "logstash.jvm_mem_pools_eden",
+ Type: module.Area,
+ Priority: prioJVMMemPoolsEden,
+ Dims: module.Dims{
+ {ID: "jvm_mem_pools_eden_committed_in_bytes", Name: "committed", Div: 1024},
+ {ID: "jvm_mem_pools_eden_used_in_bytes", Name: "used", Div: 1024},
+ },
+ },
+ {
+ ID: "jvm_mem_pools_survivor",
+ Title: "JVM Pool Survivor Memory",
+ Units: "KiB",
+ Fam: "memory",
+ Ctx: "logstash.jvm_mem_pools_survivor",
+ Type: module.Area,
+ Priority: prioJVMMemPoolsSurvivor,
+ Dims: module.Dims{
+ {ID: "jvm_mem_pools_survivor_committed_in_bytes", Name: "committed", Div: 1024},
+ {ID: "jvm_mem_pools_survivor_used_in_bytes", Name: "used", Div: 1024},
+ },
+ },
+ {
+ ID: "jvm_mem_pools_old",
+ Title: "JVM Pool Old Memory",
+ Units: "KiB",
+ Fam: "memory",
+ Ctx: "logstash.jvm_mem_pools_old",
+ Type: module.Area,
+ Priority: prioJVMMemPoolsOld,
+ Dims: module.Dims{
+ {ID: "jvm_mem_pools_old_committed_in_bytes", Name: "committed", Div: 1024},
+ {ID: "jvm_mem_pools_old_used_in_bytes", Name: "used", Div: 1024},
+ },
+ },
+ // garbage collection
+ {
+ ID: "jvm_gc_collector_count",
+ Title: "Garbage Collection Count",
+ Units: "counts/s",
+ Fam: "garbage collection",
+ Ctx: "logstash.jvm_gc_collector_count",
+ Priority: prioJVMGCCollectorCount,
+ Dims: module.Dims{
+ {ID: "jvm_gc_collectors_eden_collection_count", Name: "eden", Algo: module.Incremental},
+ {ID: "jvm_gc_collectors_old_collection_count", Name: "old", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "jvm_gc_collector_time",
+ Title: "Time Spent On Garbage Collection",
+ Units: "ms",
+ Fam: "garbage collection",
+ Ctx: "logstash.jvm_gc_collector_time",
+ Priority: prioJVMGCCollectorTime,
+ Dims: module.Dims{
+ {ID: "jvm_gc_collectors_eden_collection_time_in_millis", Name: "eden", Algo: module.Incremental},
+ {ID: "jvm_gc_collectors_old_collection_time_in_millis", Name: "old", Algo: module.Incremental},
+ },
+ },
+ // processes
+ {
+ ID: "open_file_descriptors",
+ Title: "Open File Descriptors",
+ Units: "fd",
+ Fam: "processes",
+ Ctx: "logstash.open_file_descriptors",
+ Priority: prioOpenFileDescriptors,
+ Dims: module.Dims{
+ {ID: "process_open_file_descriptors", Name: "open"},
+ },
+ },
+ // events
+ {
+ ID: "event",
+ Title: "Events Overview",
+ Units: "events/s",
+ Fam: "events",
+ Ctx: "logstash.event",
+ Priority: prioEvent,
+ Dims: module.Dims{
+ {ID: "event_in", Name: "in", Algo: module.Incremental},
+ {ID: "event_filtered", Name: "filtered", Algo: module.Incremental},
+ {ID: "event_out", Name: "out", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "event_duration",
+ Title: "Events Duration",
+ Units: "seconds",
+ Fam: "events",
+ Ctx: "logstash.event_duration",
+ Priority: prioEventDuration,
+ Dims: module.Dims{
+ {ID: "event_duration_in_millis", Name: "event", Div: 1000, Algo: module.Incremental},
+ {ID: "event_queue_push_duration_in_millis", Name: "queue", Div: 1000, Algo: module.Incremental},
+ },
+ },
+ // uptime
+ {
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "logstash.uptime",
+ Priority: prioUptime,
+ Dims: module.Dims{
+ {ID: "jvm_uptime_in_millis", Name: "uptime", Div: 1000},
+ },
+ },
+}
+
+var pipelineChartsTmpl = module.Charts{
+ {
+ ID: "pipeline_%s_event",
+ Title: "Pipeline Events",
+ Units: "events/s",
+ Fam: "pipeline events",
+ Ctx: "logstash.pipeline_event",
+ Priority: prioPipelineEvent,
+ Dims: module.Dims{
+ {ID: "pipelines_%s_event_in", Name: "in", Algo: module.Incremental},
+ {ID: "pipelines_%s_event_filtered", Name: "filtered", Algo: module.Incremental},
+ {ID: "pipelines_%s_event_out", Name: "out", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "pipeline_%s_event_duration",
+ Title: "Pipeline Events Duration",
+ Units: "seconds",
+ Fam: "pipeline events duration",
+ Ctx: "logstash.pipeline_event_duration",
+ Priority: prioPipelineEventDurations,
+ Dims: module.Dims{
+ {ID: "pipelines_%s_event_duration_in_millis", Name: "event", Div: 1000, Algo: module.Incremental},
+ {ID: "pipelines_%s_event_queue_push_duration_in_millis", Name: "queue", Div: 1000, Algo: module.Incremental},
+ },
+ },
+}
+
+func (l *Logstash) addPipelineCharts(id string) {
+ charts := pipelineChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, id)
+ chart.Labels = []module.Label{
+ {Key: "pipeline", Value: id},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, id)
+ }
+ }
+
+ if err := l.Charts().Add(*charts...); err != nil {
+ l.Warning(err)
+ }
+}
+
+func (l *Logstash) removePipelineCharts(id string) {
+ for _, chart := range *l.Charts() {
+ if strings.HasPrefix(chart.ID, "pipeline_"+id) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/logstash/collect.go b/src/go/plugin/go.d/modules/logstash/collect.go
new file mode 100644
index 00000000..ff506d64
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/collect.go
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logstash
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const urlPathNodeStatsAPI = "/_node/stats"
+
+func (l *Logstash) collect() (map[string]int64, error) {
+ stats, err := l.queryNodeStats()
+ if err != nil {
+ return nil, err
+ }
+
+ l.updateCharts(stats.Pipelines)
+
+ return stm.ToMap(stats), nil
+}
+
+func (l *Logstash) updateCharts(pipelines map[string]pipelineStats) {
+ seen := make(map[string]bool)
+
+ for id := range pipelines {
+ seen[id] = true
+ if !l.pipelines[id] {
+ l.pipelines[id] = true
+ l.addPipelineCharts(id)
+ }
+ }
+
+ for id := range l.pipelines {
+ if !seen[id] {
+ delete(l.pipelines, id)
+ l.removePipelineCharts(id)
+ }
+ }
+}
+
+func (l *Logstash) queryNodeStats() (*nodeStats, error) {
+ req, err := web.NewHTTPRequestWithPath(l.Request, urlPathNodeStatsAPI)
+ if err != nil {
+ return nil, err
+ }
+
+ var stats nodeStats
+
+ if err := l.doWithDecode(&stats, req); err != nil {
+ return nil, err
+ }
+
+ return &stats, nil
+}
+
+func (l *Logstash) doWithDecode(dst interface{}, req *http.Request) error {
+ l.Debugf("executing %s '%s'", req.Method, req.URL)
+ resp, err := l.httpClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("%s returned %d status code (%s)", req.URL, resp.StatusCode, resp.Status)
+ }
+
+ content, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("error on reading response from %s : %v", req.URL, err)
+ }
+
+ if err := json.Unmarshal(content, dst); err != nil {
+ return fmt.Errorf("error on parsing response from %s : %v", req.URL, err)
+ }
+
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/logstash/config_schema.json b/src/go/plugin/go.d/modules/logstash/config_schema.json
new file mode 100644
index 00000000..c08d136f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Logstash collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Logstash [monitoring API](https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html#monitoring).",
+ "type": "string",
+ "default": "http://localhost:9600",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/logstash/integrations/logstash.md b/src/go/plugin/go.d/modules/logstash/integrations/logstash.md
new file mode 100644
index 00000000..0ca751eb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/integrations/logstash.md
@@ -0,0 +1,283 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/logstash/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/logstash/metadata.yaml"
+sidebar_label: "Logstash"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Logs Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Logstash
+
+
+<img src="https://netdata.cloud/img/elastic-logstash.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: logstash
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Logstash instances.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Logstash instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| logstash.jvm_threads | threads | count |
+| logstash.jvm_mem_heap_used | in_use | percentage |
+| logstash.jvm_mem_heap | committed, used | KiB |
+| logstash.jvm_mem_pools_eden | committed, used | KiB |
+| logstash.jvm_mem_pools_survivor | committed, used | KiB |
+| logstash.jvm_mem_pools_old | committed, used | KiB |
+| logstash.jvm_gc_collector_count | eden, old | counts/s |
+| logstash.jvm_gc_collector_time | eden, old | ms |
+| logstash.open_file_descriptors | open | fd |
+| logstash.event | in, filtered, out | events/s |
+| logstash.event_duration | event, queue | seconds |
+| logstash.uptime | uptime | seconds |
+
+### Per pipeline
+
+These metrics refer to the pipeline.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| pipeline | pipeline name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| logstash.pipeline_event | in, filtered, out | events/s |
+| logstash.pipeline_event_duration | event, queue | seconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/logstatsh.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/logstatsh.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://localhost:9600 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://localhost:9600
+
+```
+</details>
+
+##### HTTP authentication
+
+HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://localhost:9600
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://localhost:9600
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://localhost:9600
+
+ - name: remote
+ url: http://192.0.2.1:9600
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `logstash` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m logstash
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `logstash` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep logstash
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep logstash /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep logstash
+```
+
+
diff --git a/src/go/plugin/go.d/modules/logstash/logstash.go b/src/go/plugin/go.d/modules/logstash/logstash.go
new file mode 100644
index 00000000..3ee95594
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/logstash.go
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logstash
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("logstash", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Logstash {
+ return &Logstash{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://localhost:9600",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: charts.Copy(),
+ pipelines: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Logstash struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+
+ pipelines map[string]bool
+}
+
+func (l *Logstash) Configuration() any {
+ return l.Config
+}
+
+func (l *Logstash) Init() error {
+ if l.URL == "" {
+ l.Error("config validation: 'url' cannot be empty")
+ return errors.New("url not set")
+ }
+
+ httpClient, err := web.NewHTTPClient(l.Client)
+ if err != nil {
+ l.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ l.httpClient = httpClient
+
+ l.Debugf("using URL %s", l.URL)
+ l.Debugf("using timeout: %s", l.Timeout.Duration())
+
+ return nil
+}
+
+func (l *Logstash) Check() error {
+ mx, err := l.collect()
+ if err != nil {
+ l.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (l *Logstash) Charts() *module.Charts {
+ return l.charts
+}
+
+func (l *Logstash) Collect() map[string]int64 {
+ mx, err := l.collect()
+ if err != nil {
+ l.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (l *Logstash) Cleanup() {
+ if l.httpClient != nil {
+ l.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/logstash/logstash_test.go b/src/go/plugin/go.d/modules/logstash/logstash_test.go
new file mode 100644
index 00000000..166d3981
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/logstash_test.go
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logstash
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataNodeStatsMetrics, _ = os.ReadFile("testdata/stats.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataNodeStatsMetrics": dataNodeStatsMetrics,
+ } {
+ require.NotNilf(t, data, name)
+
+ }
+}
+
+func TestLogstash_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Logstash{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestLogstash_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ls := New()
+ ls.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, ls.Init())
+ } else {
+ assert.NoError(t, ls.Init())
+ }
+ })
+ }
+}
+
+func TestLogstash_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestLogstash_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestLogstash_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (ls *Logstash, cleanup func())
+ }{
+ "success on valid response": {
+ wantFail: false,
+ prepare: caseValidResponse,
+ },
+ "fail on invalid data response": {
+ wantFail: true,
+ prepare: caseInvalidDataResponse,
+ },
+ "fail on connection refused": {
+ wantFail: true,
+ prepare: caseConnectionRefused,
+ },
+ "fail on 404 response": {
+ wantFail: true,
+ prepare: case404,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ls, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, ls.Check())
+ } else {
+ assert.NoError(t, ls.Check())
+ }
+ })
+ }
+}
+
+func TestLogstash_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (ls *Logstash, cleanup func())
+ wantNumOfCharts int
+ wantMetrics map[string]int64
+ }{
+ "success on valid response": {
+ prepare: caseValidResponse,
+ wantNumOfCharts: len(charts) + len(pipelineChartsTmpl),
+ wantMetrics: map[string]int64{
+ "event_duration_in_millis": 0,
+ "event_filtered": 0,
+ "event_in": 0,
+ "event_out": 0,
+ "event_queue_push_duration_in_millis": 0,
+ "jvm_gc_collectors_eden_collection_count": 5796,
+ "jvm_gc_collectors_eden_collection_time_in_millis": 45008,
+ "jvm_gc_collectors_old_collection_count": 7,
+ "jvm_gc_collectors_old_collection_time_in_millis": 3263,
+ "jvm_mem_heap_committed_in_bytes": 528154624,
+ "jvm_mem_heap_used_in_bytes": 189973480,
+ "jvm_mem_heap_used_percent": 35,
+ "jvm_mem_pools_eden_committed_in_bytes": 69795840,
+ "jvm_mem_pools_eden_used_in_bytes": 2600120,
+ "jvm_mem_pools_old_committed_in_bytes": 449642496,
+ "jvm_mem_pools_old_used_in_bytes": 185944824,
+ "jvm_mem_pools_survivor_committed_in_bytes": 8716288,
+ "jvm_mem_pools_survivor_used_in_bytes": 1428536,
+ "jvm_threads_count": 28,
+ "jvm_uptime_in_millis": 699809475,
+ "pipelines_pipeline-1_event_duration_in_millis": 5027018,
+ "pipelines_pipeline-1_event_filtered": 567639,
+ "pipelines_pipeline-1_event_in": 567639,
+ "pipelines_pipeline-1_event_out": 567639,
+ "pipelines_pipeline-1_event_queue_push_duration_in_millis": 84241,
+ "process_open_file_descriptors": 101,
+ },
+ },
+ "fail on invalid data response": {
+ prepare: caseInvalidDataResponse,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ "fail on connection refused": {
+ prepare: caseConnectionRefused,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ "fail on 404 response": {
+ prepare: case404,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ls, cleanup := test.prepare(t)
+ defer cleanup()
+
+ mx := ls.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ assert.Equal(t, test.wantNumOfCharts, len(*ls.Charts()))
+ ensureCollectedHasAllChartsDimsVarsIDs(t, ls, mx)
+ }
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, ls *Logstash, mx map[string]int64) {
+ for _, chart := range *ls.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func caseValidResponse(t *testing.T) (*Logstash, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathNodeStatsAPI:
+ _, _ = w.Write(dataNodeStatsMetrics)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+ ls := New()
+ ls.URL = srv.URL
+ require.NoError(t, ls.Init())
+
+ return ls, srv.Close
+}
+
+func caseInvalidDataResponse(t *testing.T) (*Logstash, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ ls := New()
+ ls.URL = srv.URL
+ require.NoError(t, ls.Init())
+
+ return ls, srv.Close
+}
+
+func caseConnectionRefused(t *testing.T) (*Logstash, func()) {
+ t.Helper()
+ ls := New()
+ ls.URL = "http://127.0.0.1:65001"
+ require.NoError(t, ls.Init())
+
+ return ls, func() {}
+}
+
+func case404(t *testing.T) (*Logstash, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ ls := New()
+ ls.URL = srv.URL
+ require.NoError(t, ls.Init())
+
+ return ls, srv.Close
+}
diff --git a/src/go/plugin/go.d/modules/logstash/metadata.yaml b/src/go/plugin/go.d/modules/logstash/metadata.yaml
new file mode 100644
index 00000000..00d92db2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/metadata.yaml
@@ -0,0 +1,274 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-logstash
+ plugin_name: go.d.plugin
+ module_name: logstash
+ monitored_instance:
+ name: Logstash
+ link: https://www.elastic.co/products/logstash
+ icon_filename: elastic-logstash.svg
+ categories:
+ - data-collection.logs-servers
+ keywords:
+ - logstatsh
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Logstash instances.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/logstatsh.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://localhost:9600
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://localhost:9600
+ - name: HTTP authentication
+ description: HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://localhost:9600
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://localhost:9600
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://localhost:9600
+
+ - name: remote
+ url: http://192.0.2.1:9600
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: logstash.jvm_threads
+ description: JVM Threads
+ unit: count
+ chart_type: line
+ dimensions:
+ - name: threads
+ - name: logstash.jvm_mem_heap_used
+ description: JVM Heap Memory Percentage
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: in_use
+ - name: logstash.jvm_mem_heap
+ description: JVM Heap Memory
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: used
+ - name: logstash.jvm_mem_pools_eden
+ description: JVM Pool Eden Memory
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: used
+ - name: logstash.jvm_mem_pools_survivor
+ description: JVM Pool Survivor Memory
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: used
+ - name: logstash.jvm_mem_pools_old
+ description: JVM Pool Old Memory
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: used
+ - name: logstash.jvm_gc_collector_count
+ description: Garbage Collection Count
+ unit: counts/s
+ chart_type: line
+ dimensions:
+ - name: eden
+ - name: old
+ - name: logstash.jvm_gc_collector_time
+ description: Time Spent On Garbage Collection
+ unit: ms
+ chart_type: line
+ dimensions:
+ - name: eden
+ - name: old
+ - name: logstash.open_file_descriptors
+ description: Open File Descriptors
+ unit: fd
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: logstash.event
+ description: Events Overview
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: in
+ - name: filtered
+ - name: out
+ - name: logstash.event_duration
+ description: Events Duration
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: event
+ - name: queue
+ - name: logstash.uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: pipeline
+ description: These metrics refer to the pipeline.
+ labels:
+ - name: pipeline
+ description: pipeline name
+ metrics:
+ - name: logstash.pipeline_event
+ description: Pipeline Events
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: in
+ - name: filtered
+ - name: out
+ - name: logstash.pipeline_event_duration
+ description: Pipeline Events Duration
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: event
+ - name: queue
diff --git a/src/go/plugin/go.d/modules/logstash/node_stats.go b/src/go/plugin/go.d/modules/logstash/node_stats.go
new file mode 100644
index 00000000..1687f333
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/node_stats.go
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logstash
+
+// https://www.elastic.co/guide/en/logstash/current/node-stats-api.html
+
+type nodeStats struct {
+ JVM jvmStats `json:"jvm" stm:"jvm"`
+ Process processStats `json:"process" stm:"process"`
+ Event eventsStats `json:"event" stm:"event"`
+ Pipelines map[string]pipelineStats `json:"pipelines" stm:"pipelines"`
+}
+
+type pipelineStats struct {
+ Event eventsStats `json:"events" stm:"event"`
+}
+
+type eventsStats struct {
+ In int `json:"in" stm:"in"`
+ Filtered int `json:"filtered" stm:"filtered"`
+ Out int `json:"out" stm:"out"`
+ DurationInMillis int `json:"duration_in_millis" stm:"duration_in_millis"`
+ QueuePushDurationInMillis int `json:"queue_push_duration_in_millis" stm:"queue_push_duration_in_millis"`
+}
+
+type processStats struct {
+ OpenFileDescriptors int `json:"open_file_descriptors" stm:"open_file_descriptors"`
+}
+
+type jvmStats struct {
+ Threads struct {
+ Count int `stm:"count"`
+ } `stm:"threads"`
+ Mem jvmMemStats `stm:"mem"`
+ GC jvmGCStats `stm:"gc"`
+ UptimeInMillis int `json:"uptime_in_millis" stm:"uptime_in_millis"`
+}
+
+type jvmMemStats struct {
+ HeapUsedPercent int `json:"heap_used_percent" stm:"heap_used_percent"`
+ HeapCommittedInBytes int `json:"heap_committed_in_bytes" stm:"heap_committed_in_bytes"`
+ HeapUsedInBytes int `json:"heap_used_in_bytes" stm:"heap_used_in_bytes"`
+ Pools struct {
+ Survivor jvmPoolStats `stm:"survivor"`
+ Old jvmPoolStats `stm:"old"`
+ Young jvmPoolStats `stm:"eden"`
+ } `stm:"pools"`
+}
+
+type jvmPoolStats struct {
+ UsedInBytes int `json:"used_in_bytes" stm:"used_in_bytes"`
+ CommittedInBytes int `json:"committed_in_bytes" stm:"committed_in_bytes"`
+}
+
+type jvmGCStats struct {
+ Collectors struct {
+ Old gcCollectorStats `stm:"old"`
+ Young gcCollectorStats `stm:"eden"`
+ } `stm:"collectors"`
+}
+
+type gcCollectorStats struct {
+ CollectionTimeInMillis int `json:"collection_time_in_millis" stm:"collection_time_in_millis"`
+ CollectionCount int `json:"collection_count" stm:"collection_count"`
+}
diff --git a/src/go/plugin/go.d/modules/logstash/testdata/config.json b/src/go/plugin/go.d/modules/logstash/testdata/config.json
new file mode 100644
index 00000000..984c3ed6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/logstash/testdata/config.yaml b/src/go/plugin/go.d/modules/logstash/testdata/config.yaml
new file mode 100644
index 00000000..8558b61c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/logstash/testdata/stats.json b/src/go/plugin/go.d/modules/logstash/testdata/stats.json
new file mode 100644
index 00000000..50fd7b07
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/testdata/stats.json
@@ -0,0 +1,252 @@
+{
+ "host" : "<replaced>",
+ "version" : "7.3.0",
+ "http_address" : "0.0.0.0:9600",
+ "id" : "<replaced>",
+ "name" : "<replaced>",
+ "ephemeral_id" : "339d4ddb-8a6e-4ddc-b843-efd4abf4bf73",
+ "status" : "green",
+ "snapshot" : false,
+ "pipeline" : {
+ "workers" : 1,
+ "batch_size" : 125,
+ "batch_delay" : 50
+ },
+ "jvm" : {
+ "threads" : {
+ "count" : 28,
+ "peak_count" : 32
+ },
+ "mem" : {
+ "heap_used_percent" : 35,
+ "heap_committed_in_bytes" : 528154624,
+ "heap_max_in_bytes" : 528154624,
+ "heap_used_in_bytes" : 189973480,
+ "non_heap_used_in_bytes" : 178053280,
+ "non_heap_committed_in_bytes" : 235200512,
+ "pools" : {
+ "young" : {
+ "committed_in_bytes" : 69795840,
+ "peak_max_in_bytes" : 69795840,
+ "max_in_bytes" : 69795840,
+ "peak_used_in_bytes" : 69795840,
+ "used_in_bytes" : 2600120
+ },
+ "old" : {
+ "committed_in_bytes" : 449642496,
+ "peak_max_in_bytes" : 449642496,
+ "max_in_bytes" : 449642496,
+ "peak_used_in_bytes" : 185944824,
+ "used_in_bytes" : 185944824
+ },
+ "survivor" : {
+ "committed_in_bytes" : 8716288,
+ "peak_max_in_bytes" : 8716288,
+ "max_in_bytes" : 8716288,
+ "peak_used_in_bytes" : 8716288,
+ "used_in_bytes" : 1428536
+ }
+ }
+ },
+ "gc" : {
+ "collectors" : {
+ "young" : {
+ "collection_count" : 5796,
+ "collection_time_in_millis" : 45008
+ },
+ "old" : {
+ "collection_count" : 7,
+ "collection_time_in_millis" : 3263
+ }
+ }
+ },
+ "uptime_in_millis" : 699809475
+ },
+ "process" : {
+ "open_file_descriptors" : 101,
+ "peak_open_file_descriptors" : 105,
+ "max_file_descriptors" : 1048576,
+ "mem" : {
+ "total_virtual_in_bytes" : 5074657280
+ },
+ "cpu" : {
+ "total_in_millis" : 7304550,
+ "percent" : 0,
+ "load_average" : {
+ "1m" : 0.73,
+ "5m" : 1.13,
+ "15m" : 1.06
+ }
+ }
+ },
+ "events" : {
+ "in" : 567639,
+ "filtered" : 567639,
+ "out" : 567639,
+ "duration_in_millis" : 5027018,
+ "queue_push_duration_in_millis" : 84241
+ },
+ "pipelines" : {
+ "pipeline-1" : {
+ "events" : {
+ "queue_push_duration_in_millis" : 84241,
+ "filtered" : 567639,
+ "duration_in_millis" : 5027018,
+ "in" : 567639,
+ "out" : 567639
+ },
+ "plugins" : {
+ "inputs" : [ {
+ "id" : "kafka input",
+ "events" : {
+ "queue_push_duration_in_millis" : 84241,
+ "out" : 567639
+ },
+ "name" : "kafka"
+ } ],
+ "codecs" : [ {
+ "id" : "json_9562e6c4-7a1a-4c18-919f-f012e58923dd",
+ "decode" : {
+ "writes_in" : 567639,
+ "duration_in_millis" : 86778,
+ "out" : 567639
+ },
+ "name" : "json",
+ "encode" : {
+ "writes_in" : 0,
+ "duration_in_millis" : 0
+ }
+ }, {
+ "id" : "plain_13e28721-e681-43ec-aa2c-c0a4d856b9ed",
+ "decode" : {
+ "writes_in" : 0,
+ "duration_in_millis" : 0,
+ "out" : 0
+ },
+ "name" : "plain",
+ "encode" : {
+ "writes_in" : 0,
+ "duration_in_millis" : 0
+ }
+ } ],
+ "filters" : [ {
+ "id" : "set default timezone",
+ "events" : {
+ "duration_in_millis" : 340,
+ "in" : 326901,
+ "out" : 326901
+ },
+ "name" : "mutate"
+ }, {
+ "id" : "assign index (filebeat)",
+ "events" : {
+ "duration_in_millis" : 858,
+ "in" : 567639,
+ "out" : 567639
+ },
+ "name" : "mutate"
+ }, {
+ "id" : "parse JSON",
+ "events" : {
+ "duration_in_millis" : 112,
+ "in" : 0,
+ "out" : 0
+ },
+ "name" : "json"
+ }, {
+ "id" : "parse LTSV",
+ "events" : {
+ "duration_in_millis" : 130,
+ "in" : 0,
+ "out" : 0
+ },
+ "name" : "kv"
+ }, {
+ "id" : "assign document_id",
+ "events" : {
+ "duration_in_millis" : 2406,
+ "in" : 567639,
+ "out" : 567639
+ },
+ "name" : "fingerprint"
+ }, {
+ "id" : "assign index (fluentd)",
+ "events" : {
+ "duration_in_millis" : 140,
+ "in" : 0,
+ "out" : 0
+ },
+ "name" : "mutate"
+ }, {
+ "id" : "parse timestamp",
+ "events" : {
+ "duration_in_millis" : 7261,
+ "in" : 326901,
+ "out" : 326901
+ },
+ "name" : "date",
+ "failures" : 1,
+ "matches" : 326900
+ } ],
+ "outputs" : [ {
+ "id" : "0f72afb28c5ff3a3897d87b04fc1b0a5fe8358cb55bbc29b995056fd868e612b",
+ "events" : {
+ "duration_in_millis" : 4063485,
+ "in" : 567639,
+ "out" : 567639
+ },
+ "name" : "elasticsearch",
+ "documents" : {
+ "successes" : 567639
+ },
+ "bulk_requests" : {
+ "responses" : {
+ "200" : 50735
+ },
+ "successes" : 50735
+ }
+ } ]
+ },
+ "reloads" : {
+ "last_error" : null,
+ "last_failure_timestamp" : null,
+ "last_success_timestamp" : null,
+ "failures" : 0,
+ "successes" : 0
+ },
+ "queue" : {
+ "type" : "persisted",
+ "events_count" : 0,
+ "queue_size_in_bytes" : 45085456,
+ "max_queue_size_in_bytes" : 1073741824
+ },
+ "hash" : "46f5c757f55a52d08ed841e9f51698653cf228ff9be41b7372f20a1b699bf129",
+ "ephemeral_id" : "c43b3a8e-882c-4e3a-a2f2-8515a5ef4ecc"
+ }
+ },
+ "reloads" : {
+ "failures" : 0,
+ "successes" : 0
+ },
+ "os" : {
+ "cgroup" : {
+ "cpuacct" : {
+ "control_group" : "/",
+ "usage_nanos" : 7304416115351
+ },
+ "cpu" : {
+ "control_group" : "/",
+ "cfs_quota_micros" : 100000,
+ "cfs_period_micros" : 100000,
+ "stat" : {
+ "time_throttled_nanos" : 124716913549,
+ "number_of_elapsed_periods" : 5875889,
+ "number_of_times_throttled" : 1219
+ }
+ }
+ }
+ },
+ "queue" : {
+ "events_count" : 0
+ }
+} \ No newline at end of file