summaryrefslogtreecommitdiffstats
path: root/exporting/prometheus/metadata.yaml
blob: 906d8ea369b1cf4dc9307bb5ad3c60f14318bc32 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
# yamllint disable rule:line-length
---
- &promexport
  id: 'export-prometheus-remote'
  meta: &meta
    name: 'Prometheus Remote Write'
    link: 'https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage'
    categories:
      - export
    icon_filename: 'prometheus.svg'
  keywords:
    - exporter
    - Prometheus
    - remote write
    - time series
  overview:
    exporter_description: |
      Use the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.
    exporter_limitations: 'The remote write exporting connector does not support buffer on failures.'
  setup:
    prerequisites:
      list:
        - title: ''
          description: |
            - Netdata and the external storage provider of your choice, installed, configured and operational.
            - `protobuf` and `snappy` libraries installed.
            - Netdata reinstalled after the libraries.
    configuration:
      file:
        name: 'exporting.conf'
      options:
        description: 'The following options can be defined for this exporter.'
        folding:
          title: 'Config options'
          enabled: true
        list:
          - name: 'enabled'
            default_value: 'no'
            description: 'Enables or disables an exporting connector instance (yes|no).'
            required: true
          - name: 'destination'
            default_value: 'no'
            description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
            required: true
            detailed_description: |
              The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
              - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
              - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
              - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.

              Example IPv4:
                ```yaml
                destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
                ```
              Example IPv6 and IPv4 together:
              ```yaml
              destination = [ffff:...:0001]:2003 10.11.12.1:2003
              ```
              When multiple servers are defined, Netdata will try the next one when the previous one fails.
          - name: 'username'
            default_value: 'my_username'
            description: 'Username for HTTP authentication'
            required: false
          - name: 'password'
            default_value: 'my_password'
            description: 'Password for HTTP authentication'
            required: false
          - name: 'data source'
            default_value: ''
            description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
            required: false
          - name: 'hostname'
            default_value: '[global].hostname'
            description: 'The hostname to be used for sending data to the external database server.'
            required: false
          - name: 'prefix'
            default_value: 'netdata'
            description: 'The prefix to add to all metrics.'
            required: false
          - name: 'update every'
            default_value: '10'
            description: |
              Frequency of sending sending data to the external database, in seconds.
            required: false
            detailed_description: |
              Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
              send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
          - name: 'buffer on failures'
            default_value: '10'
            description: |
              The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
            required: false
            detailed_description: |
              If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
          - name: 'timeout ms'
            default_value: '20000'
            description: 'The timeout in milliseconds to wait for the external database server to process the data.'
            required: false
          - name: 'send hosts matching'
            default_value: 'localhost *'
            description: |
              Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns).
            required: false
            detailed_description: |
              Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
              The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
              filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.

              A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
              use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
          - name: 'send charts matching'
            default_value: '*'
            description: |
              One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
            required: false
            detailed_description: |
              A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
              use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
              positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
              has a higher priority than the configuration option.
          - name: 'send names instead of ids'
            default_value: ''
            description: 'Controls the metric names Netdata should send to the external database (yes|no).'
            required: false
            detailed_description: |
              Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
              are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
              different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
          - name: 'send configured labels'
            default_value: ''
            description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
            required: false
          - name: 'send automatic labels'
            default_value: ''
            description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
            required: false
      examples:
        folding:
          enabled: true
          title: ''
        list:
          - name: 'Example configuration'
            folding:
              enabled: false
            description: 'Basic example configuration for Prometheus remote write.'
            config: |
             [prometheus_remote_write:my_instance]
                 enabled = yes
                 destination = 10.11.14.2:2003
                 remote write URL path = /receive
          - name: 'Example configuration with HTTPS and HTTP authentication'
            folding:
              enabled: false
            description: 'Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.'
            config: |
              [prometheus_remote_write:https:my_instance]
                  enabled = yes
                  destination = 10.11.14.2:2003
                  remote write URL path = /receive
                  username = my_username
                  password = my_password
- <<: *promexport
  id: 'export-appoptics'
  meta:
    <<: *meta
    name: AppOptics
    link: https://www.solarwinds.com/appoptics
    icon_filename: 'solarwinds.svg'
    keywords:
      - app optics
      - AppOptics
      - Solarwinds
- <<: *promexport
  id: 'export-azure-data'
  meta:
    <<: *meta
    name: Azure Data Explorer
    link: https://azure.microsoft.com/en-us/pricing/details/data-explorer/
    icon_filename: 'azuredataex.jpg'
    keywords:
      - Azure Data Explorer
      - Azure
- <<: *promexport
  id: 'export-azure-event'
  meta:
    <<: *meta
    name: Azure Event Hub
    link: https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-about
    icon_filename: 'azureeventhub.png'
    keywords:
      - Azure Event Hub
      - Azure
- <<: *promexport
  id: 'export-newrelic'
  meta:
    <<: *meta
    name: New Relic
    link: https://newrelic.com/
    icon_filename: 'newrelic.svg'
    keywords:
      - export
      - NewRelic
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-quasar'
  meta:
    <<: *meta
    name: QuasarDB
    link: https://doc.quasar.ai/master/
    icon_filename: 'quasar.jpeg'
    keywords:
      - export
      - quasar
      - quasarDB
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-splunk'
  meta:
    <<: *meta
    name: Splunk SignalFx
    link: https://www.splunk.com/en_us/products/observability.html
    icon_filename: 'splunk.svg'
    keywords:
      - export
      - splunk
      - signalfx
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-tikv'
  meta:
    <<: *meta
    name: TiKV
    link: https://tikv.org/
    icon_filename: 'tikv.png'
    keywords:
      - export
      - TiKV
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-thanos'
  meta:
    <<: *meta
    name: Thanos
    link: https://thanos.io/
    icon_filename: 'thanos.png'
    keywords:
      - export
      - thanos
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-victoria'
  meta:
    <<: *meta
    name: VictoriaMetrics
    link: https://victoriametrics.com/products/open-source/
    icon_filename: 'victoriametrics.png'
    keywords:
      - export
      - victoriametrics
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-vmware'
  meta:
    <<: *meta
    name: VMware Aria
    link: https://www.vmware.com/products/aria-operations-for-applications.html
    icon_filename: 'aria.png'
    keywords:
      - export
      - VMware
      - Aria
      - Tanzu
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-chronix'
  meta:
    <<: *meta
    name: Chronix
    link: https://dbdb.io/db/chronix
    icon_filename: 'chronix.png'
    keywords:
      - export
      - chronix
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-cortex'
  meta:
    <<: *meta
    name: Cortex
    link: https://cortexmetrics.io/
    icon_filename: 'cortex.png'
    keywords:
      - export
      - cortex
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-crate'
  meta:
    <<: *meta
    name: CrateDB
    link: https://crate.io/
    icon_filename: 'crate.svg'
    keywords:
      - export
      - CrateDB
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-elastic'
  meta:
    <<: *meta
    name: ElasticSearch
    link: https://www.elastic.co/
    icon_filename: 'elasticsearch.svg'
    keywords:
      - export
      - ElasticSearch
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-gnocchi'
  meta:
    <<: *meta
    name: Gnocchi
    link: https://wiki.openstack.org/wiki/Gnocchi
    icon_filename: 'gnocchi.svg'
    keywords:
      - export
      - Gnocchi
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-bigquery'
  meta:
    <<: *meta
    name: Google BigQuery
    link: https://cloud.google.com/bigquery/
    icon_filename: 'bigquery.png'
    keywords:
      - export
      - Google BigQuery
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-irondb'
  meta:
    <<: *meta
    name: IRONdb
    link: https://docs.circonus.com/irondb/
    icon_filename: 'irondb.png'
    keywords:
      - export
      - IRONdb
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-kafka'
  meta:
    <<: *meta
    name: Kafka
    link: https://kafka.apache.org/
    icon_filename: 'kafka.svg'
    keywords:
      - export
      - Kafka
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-m3db'
  meta:
    <<: *meta
    name: M3DB
    link: https://m3db.io/
    icon_filename: 'm3db.png'
    keywords:
      - export
      - M3DB
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-metricfire'
  meta:
    <<: *meta
    name: MetricFire
    link: https://www.metricfire.com/
    icon_filename: 'metricfire.png'
    keywords:
      - export
      - MetricFire
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-pgsql'
  meta:
    <<: *meta
    name: PostgreSQL
    link: https://www.postgresql.org/
    icon_filename: 'postgres.svg'
    keywords:
      - export
      - PostgreSQL
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-wavefront'
  meta:
    <<: *meta
    name: Wavefront
    link: https://docs.wavefront.com/wavefront_data_ingestion.html
    icon_filename: 'wavefront.png'
    keywords:
      - export
      - Wavefront
      - prometheus
      - remote write
- <<: *promexport
  id: 'export-timescaledb'
  meta:
    <<: *meta
    name: TimescaleDB
    link: https://www.timescale.com/
    icon_filename: 'timescale.png'
    keywords:
      - export
      - TimescaleDB
      - prometheus
      - remote write