summaryrefslogtreecommitdiffstats
path: root/src/fluent-bit/plugins/out_opensearch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 12:08:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 12:08:18 +0000
commit5da14042f70711ea5cf66e034699730335462f66 (patch)
tree0f6354ccac934ed87a2d555f45be4c831cf92f4a /src/fluent-bit/plugins/out_opensearch
parentReleasing debian version 1.44.3-2. (diff)
downloadnetdata-5da14042f70711ea5cf66e034699730335462f66.tar.xz
netdata-5da14042f70711ea5cf66e034699730335462f66.zip
Merging upstream version 1.45.3+dfsg.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/fluent-bit/plugins/out_opensearch')
-rw-r--r--src/fluent-bit/plugins/out_opensearch/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_opensearch/opensearch.c1291
-rw-r--r--src/fluent-bit/plugins/out_opensearch/opensearch.h155
-rw-r--r--src/fluent-bit/plugins/out_opensearch/os_conf.c411
-rw-r--r--src/fluent-bit/plugins/out_opensearch/os_conf.h33
5 files changed, 1896 insertions, 0 deletions
diff --git a/src/fluent-bit/plugins/out_opensearch/CMakeLists.txt b/src/fluent-bit/plugins/out_opensearch/CMakeLists.txt
new file mode 100644
index 000000000..0e2bf59fe
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opensearch/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ os_conf.c
+ opensearch.c
+ )
+
+FLB_PLUGIN(out_opensearch "${src}" "")
diff --git a/src/fluent-bit/plugins/out_opensearch/opensearch.c b/src/fluent-bit/plugins/out_opensearch/opensearch.c
new file mode 100644
index 000000000..dbd0fa8d0
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opensearch/opensearch.c
@@ -0,0 +1,1291 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_signv4.h>
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_ra_key.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <msgpack.h>
+
+#include <cfl/cfl.h>
+
+#include "opensearch.h"
+#include "os_conf.h"
+
+static int os_pack_array_content(msgpack_packer *tmp_pck,
+ msgpack_object array,
+ struct flb_opensearch *ctx);
+
+#ifdef FLB_HAVE_AWS
+static flb_sds_t add_aws_auth(struct flb_http_client *c,
+ struct flb_opensearch *ctx)
+{
+ flb_sds_t signature = NULL;
+ int ret;
+
+ flb_plg_debug(ctx->ins, "Signing request with AWS Sigv4");
+
+ /* Amazon OpenSearch Sigv4 does not allow the host header to include the port */
+ ret = flb_http_strip_port_from_host(c);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "could not strip port from host for sigv4");
+ return NULL;
+ }
+
+ /* AWS Fluent Bit user agent */
+ flb_http_add_header(c, "User-Agent", 10, "aws-fluent-bit-plugin", 21);
+
+ signature = flb_signv4_do(c, FLB_TRUE, FLB_TRUE, time(NULL),
+ ctx->aws_region, ctx->aws_service_name,
+ S3_MODE_SIGNED_PAYLOAD, ctx->aws_unsigned_headers,
+ ctx->aws_provider);
+ if (!signature) {
+ flb_plg_error(ctx->ins, "could not sign request with sigv4");
+ return NULL;
+ }
+ return signature;
+}
+#endif /* FLB_HAVE_AWS */
+
+static int os_pack_map_content(msgpack_packer *tmp_pck,
+ msgpack_object map,
+ struct flb_opensearch *ctx)
+{
+ int i;
+ char *ptr_key = NULL;
+ char buf_key[256];
+ msgpack_object *k;
+ msgpack_object *v;
+
+ for (i = 0; i < map.via.map.size; i++) {
+ k = &map.via.map.ptr[i].key;
+ v = &map.via.map.ptr[i].val;
+ ptr_key = NULL;
+
+ /* Store key */
+ const char *key_ptr = NULL;
+ size_t key_size = 0;
+
+ if (k->type == MSGPACK_OBJECT_BIN) {
+ key_ptr = k->via.bin.ptr;
+ key_size = k->via.bin.size;
+ }
+ else if (k->type == MSGPACK_OBJECT_STR) {
+ key_ptr = k->via.str.ptr;
+ key_size = k->via.str.size;
+ }
+
+ if (key_size < (sizeof(buf_key) - 1)) {
+ memcpy(buf_key, key_ptr, key_size);
+ buf_key[key_size] = '\0';
+ ptr_key = buf_key;
+ }
+ else {
+ /* Long map keys have a performance penalty */
+ ptr_key = flb_malloc(key_size + 1);
+ if (!ptr_key) {
+ flb_errno();
+ return -1;
+ }
+
+ memcpy(ptr_key, key_ptr, key_size);
+ ptr_key[key_size] = '\0';
+ }
+
+ /*
+ * Sanitize key name, it don't allow dots in field names:
+ *
+ * https://goo.gl/R5NMTr
+ */
+ if (ctx->replace_dots == FLB_TRUE) {
+ char *p = ptr_key;
+ char *end = ptr_key + key_size;
+ while (p != end) {
+ if (*p == '.') *p = '_';
+ p++;
+ }
+ }
+
+ /* Append the key */
+ msgpack_pack_str(tmp_pck, key_size);
+ msgpack_pack_str_body(tmp_pck, ptr_key, key_size);
+
+ /* Release temporary key if was allocated */
+ if (ptr_key && ptr_key != buf_key) {
+ flb_free(ptr_key);
+ }
+ ptr_key = NULL;
+
+ /*
+ * The value can be any data type, if it's a map we need to
+ * sanitize to avoid dots.
+ */
+ if (v->type == MSGPACK_OBJECT_MAP) {
+ msgpack_pack_map(tmp_pck, v->via.map.size);
+ os_pack_map_content(tmp_pck, *v, ctx);
+ }
+ /*
+ * The value can be any data type, if it's an array we need to
+ * pass it to os_pack_array_content.
+ */
+ else if (v->type == MSGPACK_OBJECT_ARRAY) {
+ msgpack_pack_array(tmp_pck, v->via.array.size);
+ os_pack_array_content(tmp_pck, *v, ctx);
+ }
+ else {
+ msgpack_pack_object(tmp_pck, *v);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Iterate through the array and sanitize elements.
+ * Mutual recursion with os_pack_map_content.
+ */
+static int os_pack_array_content(msgpack_packer *tmp_pck,
+ msgpack_object array,
+ struct flb_opensearch *ctx)
+{
+ int i;
+ msgpack_object *e;
+
+ for (i = 0; i < array.via.array.size; i++) {
+ e = &array.via.array.ptr[i];
+ if (e->type == MSGPACK_OBJECT_MAP) {
+ msgpack_pack_map(tmp_pck, e->via.map.size);
+ os_pack_map_content(tmp_pck, *e, ctx);
+ }
+ else if (e->type == MSGPACK_OBJECT_ARRAY) {
+ msgpack_pack_array(tmp_pck, e->via.array.size);
+ os_pack_array_content(tmp_pck, *e, ctx);
+ }
+ else {
+ msgpack_pack_object(tmp_pck, *e);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Get _id value from incoming record.
+ * If it successed, return the value as flb_sds_t.
+ * If it failed, return NULL.
+*/
+static flb_sds_t os_get_id_value(struct flb_opensearch *ctx,
+ msgpack_object *map)
+{
+ struct flb_ra_value *rval = NULL;
+ flb_sds_t tmp_str;
+ rval = flb_ra_get_value_object(ctx->ra_id_key, *map);
+ if (rval == NULL) {
+ flb_plg_warn(ctx->ins, "the value of %s is missing",
+ ctx->id_key);
+ return NULL;
+ }
+ else if(rval->o.type != MSGPACK_OBJECT_STR) {
+ flb_plg_warn(ctx->ins, "the value of %s is not string",
+ ctx->id_key);
+ flb_ra_key_value_destroy(rval);
+ return NULL;
+ }
+
+ tmp_str = flb_sds_create_len(rval->o.via.str.ptr,
+ rval->o.via.str.size);
+ if (tmp_str == NULL) {
+ flb_plg_warn(ctx->ins, "cannot create ID string from record");
+ flb_ra_key_value_destroy(rval);
+ return NULL;
+ }
+ flb_ra_key_value_destroy(rval);
+ return tmp_str;
+}
+
+static int compose_index_header(struct flb_opensearch *ctx,
+ int index_custom_len,
+ char *logstash_index, size_t logstash_index_size,
+ char *separator_str,
+ struct tm *tm)
+{
+ int ret;
+ int len;
+ char *p;
+ size_t s;
+
+ /* Compose Index header */
+ if (index_custom_len > 0) {
+ p = logstash_index + index_custom_len;
+ } else {
+ p = logstash_index + flb_sds_len(ctx->logstash_prefix);
+ }
+ len = p - logstash_index;
+ ret = snprintf(p, logstash_index_size - len, "%s",
+ separator_str);
+ if (ret > logstash_index_size - len) {
+ /* exceed limit */
+ return -1;
+ }
+ p += strlen(separator_str);
+ len += strlen(separator_str);
+
+ s = strftime(p, logstash_index_size - len,
+ ctx->logstash_dateformat, tm);
+ if (s==0) {
+ /* exceed limit */
+ return -1;
+ }
+ p += s;
+ *p++ = '\0';
+
+ return 0;
+}
+
+/*
+ * Convert the internal Fluent Bit data representation to the required
+ * one by OpenSearch.
+ */
+static int opensearch_format(struct flb_config *config,
+ struct flb_input_instance *ins,
+ void *plugin_context,
+ void *flush_ctx,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_data, size_t *out_size)
+{
+ int ret;
+ int len;
+ int map_size;
+ int index_len = 0;
+ int write_op_update = FLB_FALSE;
+ int write_op_upsert = FLB_FALSE;
+ flb_sds_t ra_index = NULL;
+ size_t s = 0;
+ char *index = NULL;
+ char logstash_index[256];
+ char time_formatted[256];
+ char index_formatted[256];
+ char uuid[37];
+ flb_sds_t out_buf;
+ flb_sds_t id_key_str = NULL;
+ msgpack_object map;
+ flb_sds_t bulk;
+ struct tm tm;
+ struct flb_time tms;
+ msgpack_sbuffer tmp_sbuf;
+ msgpack_packer tmp_pck;
+ cfl_hash_128bits_t hash;
+ unsigned char h[sizeof(cfl_hash_128bits_t)];
+ int index_custom_len;
+ struct flb_opensearch *ctx = plugin_context;
+ flb_sds_t j_index;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ j_index = flb_sds_create_size(FLB_OS_HEADER_SIZE);
+ if (j_index == NULL) {
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return -1;
+ }
+
+ bulk = flb_sds_create_size(bytes * 2);
+ if (!bulk) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_sds_destroy(j_index);
+
+ return -1;
+ }
+
+ /* Copy logstash prefix if logstash format is enabled */
+ if (ctx->logstash_format == FLB_TRUE) {
+ strncpy(logstash_index, ctx->logstash_prefix, sizeof(logstash_index));
+ logstash_index[sizeof(logstash_index) - 1] = '\0';
+ }
+
+ /*
+ * If logstash format and id generation are disabled, pre-generate
+ * the index line for all records.
+ *
+ * The header stored in 'j_index' will be used for the all records on
+ * this payload.
+ */
+ if (ctx->logstash_format == FLB_FALSE && ctx->generate_id == FLB_FALSE && ctx->ra_index == NULL) {
+ flb_time_get(&tms);
+ gmtime_r(&tms.tm.tv_sec, &tm);
+ strftime(index_formatted, sizeof(index_formatted) - 1,
+ ctx->index, &tm);
+ index = index_formatted;
+ if (ctx->suppress_type_name) {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT_NO_TYPE,
+ ctx->action,
+ index);
+ }
+ else {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT,
+ ctx->action,
+ index, ctx->type);
+ }
+
+ if (index_len == -1) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_sds_destroy(bulk);
+ flb_sds_destroy(j_index);
+ return -1;
+ }
+ }
+
+ /*
+ * Some broken clients may have time drift up to year 1970
+ * this will generate corresponding index in OpenSearch
+ * in order to prevent generating millions of indexes
+ * we can set to always use current time for index generation
+ */
+ if (ctx->current_time_index == FLB_TRUE) {
+ flb_time_get(&tms);
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ /* Only pop time from record if current_time_index is disabled */
+ if (!ctx->current_time_index) {
+ flb_time_copy(&tms, &log_event.timestamp);
+ }
+
+ map = *log_event.body;
+ map_size = map.via.map.size;
+
+ index_custom_len = 0;
+ if (ctx->logstash_prefix_key) {
+ flb_sds_t v = flb_ra_translate(ctx->ra_prefix_key,
+ (char *) tag, tag_len,
+ map, NULL);
+ if (v) {
+ len = flb_sds_len(v);
+ if (len > 128) {
+ len = 128;
+ memcpy(logstash_index, v, 128);
+ }
+ else {
+ memcpy(logstash_index, v, len);
+ }
+
+ index_custom_len = len;
+ flb_sds_destroy(v);
+ }
+ }
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&tmp_sbuf);
+ msgpack_packer_init(&tmp_pck, &tmp_sbuf, msgpack_sbuffer_write);
+
+ if (ctx->include_tag_key) {
+ map_size++;
+ }
+
+ /* Set the new map size */
+ msgpack_pack_map(&tmp_pck, map_size + 1);
+
+ /* Append the time key */
+ msgpack_pack_str(&tmp_pck, flb_sds_len(ctx->time_key));
+ msgpack_pack_str_body(&tmp_pck, ctx->time_key, flb_sds_len(ctx->time_key));
+
+ /* Format the time */
+ gmtime_r(&tms.tm.tv_sec, &tm);
+ s = strftime(time_formatted, sizeof(time_formatted) - 1,
+ ctx->time_key_format, &tm);
+ if (ctx->time_key_nanos) {
+ len = snprintf(time_formatted + s, sizeof(time_formatted) - 1 - s,
+ ".%09" PRIu64 "Z", (uint64_t) tms.tm.tv_nsec);
+ } else {
+ len = snprintf(time_formatted + s, sizeof(time_formatted) - 1 - s,
+ ".%03" PRIu64 "Z",
+ (uint64_t) tms.tm.tv_nsec / 1000000);
+ }
+
+ s += len;
+ msgpack_pack_str(&tmp_pck, s);
+ msgpack_pack_str_body(&tmp_pck, time_formatted, s);
+
+ index = ctx->index;
+ if (ctx->logstash_format == FLB_TRUE) {
+ ret = compose_index_header(ctx, index_custom_len,
+ &logstash_index[0], sizeof(logstash_index),
+ ctx->logstash_prefix_separator, &tm);
+ if (ret < 0) {
+ /* retry with default separator */
+ compose_index_header(ctx, index_custom_len,
+ &logstash_index[0], sizeof(logstash_index),
+ "-", &tm);
+ }
+ index = logstash_index;
+ if (ctx->generate_id == FLB_FALSE) {
+ if (ctx->suppress_type_name) {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT_NO_TYPE,
+ ctx->action,
+ index);
+ }
+ else {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT,
+ ctx->action,
+ index, ctx->type);
+ }
+ }
+ }
+ else if (ctx->current_time_index == FLB_TRUE) {
+ /* Make sure we handle index time format for index */
+ strftime(index_formatted, sizeof(index_formatted) - 1,
+ ctx->index, &tm);
+ index = index_formatted;
+ }
+ else if (ctx->ra_index) {
+ // free any previous ra_index to avoid memory leaks.
+ if (ra_index != NULL) {
+ flb_sds_destroy(ra_index);
+ }
+ /* a record accessor pattern exists for the index */
+ ra_index = flb_ra_translate(ctx->ra_index,
+ (char *) tag, tag_len,
+ map, NULL);
+ if (!ra_index) {
+ flb_plg_warn(ctx->ins, "invalid index translation from record accessor pattern, default to static index");
+ }
+ else {
+ index = ra_index;
+ }
+
+ if (ctx->suppress_type_name) {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT_NO_TYPE,
+ ctx->action,
+ index);
+ }
+ else {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT,
+ ctx->action,
+ index, ctx->type);
+ }
+ }
+
+ /* Tag Key */
+ if (ctx->include_tag_key == FLB_TRUE) {
+ msgpack_pack_str(&tmp_pck, flb_sds_len(ctx->tag_key));
+ msgpack_pack_str_body(&tmp_pck, ctx->tag_key, flb_sds_len(ctx->tag_key));
+ msgpack_pack_str(&tmp_pck, tag_len);
+ msgpack_pack_str_body(&tmp_pck, tag, tag_len);
+ }
+
+ /*
+ * The map_content routine iterate over each Key/Value pair found in
+ * the map and do some sanitization for the key names.
+ *
+ * There is a restriction that key names cannot contain a dot; if some
+ * dot is found, it's replaced with an underscore.
+ */
+ ret = os_pack_map_content(&tmp_pck, map, ctx);
+ if (ret == -1) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ flb_sds_destroy(bulk);
+ flb_sds_destroy(j_index);
+ if (ra_index != NULL) {
+ flb_sds_destroy(ra_index);
+ }
+ return -1;
+ }
+
+ if (ctx->generate_id == FLB_TRUE) {
+ /* use a 128 bit hash and copy it to a buffer */
+ hash = cfl_hash_128bits(tmp_sbuf.data, tmp_sbuf.size);
+ memcpy(h, &hash, sizeof(hash));
+ snprintf(uuid, sizeof(uuid),
+ "%02X%02X%02X%02X-%02X%02X-%02X%02X-"
+ "%02X%02X-%02X%02X%02X%02X%02X%02X",
+ h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7],
+ h[8], h[9], h[10], h[11], h[12], h[13], h[14], h[15]);
+
+ if (ctx->suppress_type_name) {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT_ID_NO_TYPE,
+ ctx->action,
+ index, uuid);
+ }
+ else {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT_ID,
+ ctx->action,
+ index, ctx->type, uuid);
+ }
+ }
+ if (ctx->ra_id_key) {
+ id_key_str = os_get_id_value(ctx ,&map);
+ if (id_key_str) {
+ if (ctx->suppress_type_name) {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT_ID_NO_TYPE,
+ ctx->action,
+ index, id_key_str);
+ }
+ else {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT_ID,
+ ctx->action,
+ index, ctx->type, id_key_str);
+ }
+ flb_sds_destroy(id_key_str);
+ id_key_str = NULL;
+ }
+ }
+
+ /* Convert msgpack to JSON */
+ out_buf = flb_msgpack_raw_to_json_sds(tmp_sbuf.data, tmp_sbuf.size);
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ if (!out_buf) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_sds_destroy(bulk);
+ flb_sds_destroy(j_index);
+ if (ra_index != NULL) {
+ flb_sds_destroy(ra_index);
+ }
+ return -1;
+ }
+
+ ret = flb_sds_cat_safe(&bulk, j_index, flb_sds_len(j_index));
+ if (ret == -1) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ *out_size = 0;
+ flb_sds_destroy(bulk);
+ flb_sds_destroy(j_index);
+ flb_sds_destroy(out_buf);
+ if (ra_index != NULL) {
+ flb_sds_destroy(ra_index);
+ }
+ return -1;
+ }
+
+ if (strcasecmp(ctx->write_operation, FLB_OS_WRITE_OP_UPDATE) == 0) {
+ write_op_update = FLB_TRUE;
+ }
+ else if (strcasecmp(ctx->write_operation, FLB_OS_WRITE_OP_UPSERT) == 0) {
+ write_op_upsert = FLB_TRUE;
+ }
+
+ /* UPDATE | UPSERT */
+ if (write_op_update) {
+ flb_sds_cat_safe(&bulk,
+ OS_BULK_UPDATE_OP_BODY,
+ sizeof(OS_BULK_UPDATE_OP_BODY) - 1);
+ }
+ else if (write_op_upsert) {
+ flb_sds_cat_safe(&bulk,
+ OS_BULK_UPSERT_OP_BODY,
+ sizeof(OS_BULK_UPSERT_OP_BODY) - 1);
+ }
+
+ ret = flb_sds_cat_safe(&bulk, out_buf, flb_sds_len(out_buf));
+ if (ret == -1) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ *out_size = 0;
+ flb_sds_destroy(bulk);
+ flb_sds_destroy(j_index);
+ flb_sds_destroy(out_buf);
+ if (ra_index != NULL) {
+ flb_sds_destroy(ra_index);
+ }
+ return -1;
+ }
+
+ /* finish UPDATE | UPSERT */
+ if (write_op_update || write_op_upsert) {
+ flb_sds_cat_safe(&bulk, "}", 1);
+ }
+
+ flb_sds_cat_safe(&bulk, "\n", 1);
+ flb_sds_destroy(out_buf);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ /* Set outgoing data */
+ *out_data = bulk;
+ *out_size = flb_sds_len(bulk);
+
+ if (ra_index != NULL) {
+ flb_sds_destroy(ra_index);
+ }
+ /*
+ * Note: we don't destroy the bulk as we need to keep the allocated
+ * buffer with the data. Instead we just release the bulk context and
+ * return the bulk->ptr buffer
+ */
+ if (ctx->trace_output) {
+ fwrite(*out_data, 1, *out_size, stdout);
+ fflush(stdout);
+ }
+ flb_sds_destroy(j_index);
+ return 0;
+}
+
+static int cb_opensearch_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ struct flb_opensearch *ctx;
+
+ ctx = flb_os_conf_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "cannot initialize plugin");
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "host=%s port=%i uri=%s index=%s type=%s",
+ ins->host.name, ins->host.port, ctx->uri,
+ ctx->index, ctx->type);
+
+ flb_output_set_context(ins, ctx);
+
+ /*
+ * This plugin instance uses the HTTP client interface, let's register
+ * it debugging callbacks.
+ */
+ flb_output_set_http_debug_callbacks(ins);
+
+ return 0;
+}
+
+static int opensearch_error_check(struct flb_opensearch *ctx,
+ struct flb_http_client *c)
+{
+ int i, j, k;
+ int ret;
+ int check = FLB_FALSE;
+ int root_type;
+ char *out_buf;
+ size_t off = 0;
+ size_t out_size;
+ msgpack_unpacked result;
+ msgpack_object root;
+ msgpack_object key;
+ msgpack_object val;
+ msgpack_object item;
+ msgpack_object item_key;
+ msgpack_object item_val;
+
+ /*
+ * Check if our payload is complete: there is such situations where
+ * the OpenSearch HTTP response body is bigger than the HTTP client
+ * buffer so payload can be incomplete.
+ */
+ /* Convert JSON payload to msgpack */
+ ret = flb_pack_json(c->resp.payload, c->resp.payload_size,
+ &out_buf, &out_size, &root_type, NULL);
+ if (ret == -1) {
+ /* Is this an incomplete HTTP Request ? */
+ if (c->resp.payload_size <= 0) {
+ return FLB_TRUE;
+ }
+
+ /* Lookup error field */
+ if (strstr(c->resp.payload, "\"errors\":false,\"items\":[")) {
+ return FLB_FALSE;
+ }
+
+ flb_plg_error(ctx->ins, "could not pack/validate JSON response\n%s",
+ c->resp.payload);
+ return FLB_TRUE;
+ }
+
+ /* Lookup error field */
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, out_buf, out_size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ flb_plg_error(ctx->ins, "Cannot unpack response to find error\n%s",
+ c->resp.payload);
+ return FLB_TRUE;
+ }
+
+ root = result.data;
+ if (root.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected payload type=%i",
+ root.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ for (i = 0; i < root.via.map.size; i++) {
+ key = root.via.map.ptr[i].key;
+ if (key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "unexpected key type=%i",
+ key.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ if (key.via.str.size == 6 && strncmp(key.via.str.ptr, "errors", 6) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_BOOLEAN) {
+ flb_plg_error(ctx->ins, "unexpected 'error' value type=%i",
+ val.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ /* If error == false, we are OK (no errors = FLB_FALSE) */
+ if (!val.via.boolean) {
+ /* no errors */
+ check = FLB_FALSE;
+ goto done;
+ }
+ }
+ else if (key.via.str.size == 5 && strncmp(key.via.str.ptr, "items", 5) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_ARRAY) {
+ flb_plg_error(ctx->ins, "unexpected 'items' value type=%i",
+ val.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ for (j = 0; j < val.via.array.size; j++) {
+ item = val.via.array.ptr[j];
+ if (item.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected 'item' outer value type=%i",
+ item.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ if (item.via.map.size != 1) {
+ flb_plg_error(ctx->ins, "unexpected 'item' size=%i",
+ item.via.map.size);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ item = item.via.map.ptr[0].val;
+ if (item.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected 'item' inner value type=%i",
+ item.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ for (k = 0; k < item.via.map.size; k++) {
+ item_key = item.via.map.ptr[k].key;
+ if (item_key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "unexpected key type=%i",
+ item_key.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ if (item_key.via.str.size == 6 && strncmp(item_key.via.str.ptr, "status", 6) == 0) {
+ item_val = item.via.map.ptr[k].val;
+
+ if (item_val.type != MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ flb_plg_error(ctx->ins, "unexpected 'status' value type=%i",
+ item_val.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+ /* Check for errors other than version conflict (document already exists) */
+ if (item_val.via.i64 != 409) {
+ check = FLB_TRUE;
+ goto done;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ done:
+ flb_free(out_buf);
+ msgpack_unpacked_destroy(&result);
+ return check;
+}
+
+static void cb_opensearch_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *ins, void *out_context,
+ struct flb_config *config)
+{
+ int ret = -1;
+ size_t pack_size;
+ flb_sds_t pack;
+ void *out_buf;
+ size_t out_size;
+ size_t b_sent;
+ struct flb_opensearch *ctx = out_context;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ flb_sds_t signature = NULL;
+ int compressed = FLB_FALSE;
+ void *final_payload_buf = NULL;
+ size_t final_payload_size = 0;
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Convert format */
+ if (event_chunk->type == FLB_EVENT_TYPE_TRACES) {
+ pack = flb_msgpack_raw_to_json_sds(event_chunk->data, event_chunk->size);
+ if (pack) {
+ ret = 0;
+ }
+ else {
+ ret = -1;
+ }
+ }
+ else if (event_chunk->type == FLB_EVENT_TYPE_LOGS) {
+ ret = opensearch_format(config, ins,
+ ctx, NULL,
+ event_chunk->type,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ event_chunk->data, event_chunk->size,
+ &out_buf, &out_size);
+ }
+
+ if (ret != 0) {
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ pack = (char *) out_buf;
+ pack_size = out_size;
+
+ final_payload_buf = pack;
+ final_payload_size = pack_size;
+ /* Should we compress the payload ? */
+ if (ctx->compression == FLB_OS_COMPRESSION_GZIP) {
+ ret = flb_gzip_compress((void *) pack, pack_size,
+ &out_buf, &out_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "cannot gzip payload, disabling compression");
+ }
+ else {
+ compressed = FLB_TRUE;
+ final_payload_buf = out_buf;
+ final_payload_size = out_size;
+ }
+ }
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->uri,
+ final_payload_buf, final_payload_size, NULL, 0, NULL, 0);
+
+ flb_http_buffer_size(c, ctx->buffer_size);
+
+#ifndef FLB_HAVE_AWS
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+#endif
+
+ flb_http_add_header(c, "Content-Type", 12, "application/x-ndjson", 20);
+
+ if (ctx->http_user && ctx->http_passwd) {
+ flb_http_basic_auth(c, ctx->http_user, ctx->http_passwd);
+ }
+
+#ifdef FLB_HAVE_AWS
+ if (ctx->has_aws_auth == FLB_TRUE) {
+ signature = add_aws_auth(c, ctx);
+ if (!signature) {
+ goto retry;
+ }
+ }
+ else {
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ }
+#endif
+
+ /* Set Content-Encoding of compressed payload */
+ if (compressed == FLB_TRUE) {
+ if (ctx->compression == FLB_OS_COMPRESSION_GZIP) {
+ flb_http_set_content_encoding_gzip(c);
+ }
+ }
+
+ /* Map debug callbacks */
+ flb_http_client_debug(c, ctx->ins->callback);
+
+ ret = flb_http_do(c, &b_sent);
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "http_do=%i URI=%s", ret, ctx->uri);
+ goto retry;
+ }
+ else {
+ /* The request was issued successfully, validate the 'error' field */
+ flb_plg_debug(ctx->ins, "HTTP Status=%i URI=%s", c->resp.status, ctx->uri);
+ if (c->resp.status != 200 && c->resp.status != 201) {
+ if (c->resp.payload_size > 0) {
+ flb_plg_error(ctx->ins, "HTTP status=%i URI=%s, response:\n%s\n",
+ c->resp.status, ctx->uri, c->resp.payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "HTTP status=%i URI=%s",
+ c->resp.status, ctx->uri);
+ }
+ goto retry;
+ }
+
+ if (c->resp.payload_size > 0) {
+ /*
+ * OpenSearch payload should be JSON, we convert it to msgpack
+ * and lookup the 'error' field.
+ */
+ ret = opensearch_error_check(ctx, c);
+ if (ret == FLB_TRUE) {
+ /* we got an error */
+ if (ctx->trace_error) {
+ /*
+ * If trace_error is set, trace the actual
+ * response from Elasticsearch explaining the problem.
+ * Trace_Output can be used to see the request.
+ */
+ if (pack_size < 4000) {
+ flb_plg_debug(ctx->ins, "error caused by: Input\n%.*s\n",
+ (int) pack_size, pack);
+ }
+ if (c->resp.payload_size < 4000) {
+ flb_plg_error(ctx->ins, "error: Output\n%s",
+ c->resp.payload);
+ } else {
+ /*
+ * We must use fwrite since the flb_log functions
+ * will truncate data at 4KB
+ */
+ fwrite(c->resp.payload, 1, c->resp.payload_size, stderr);
+ fflush(stderr);
+ }
+ }
+ goto retry;
+ }
+ else {
+ flb_plg_debug(ctx->ins, "OpenSearch response\n%s",
+ c->resp.payload);
+ }
+ }
+ else {
+ goto retry;
+ }
+ }
+
+ /* Cleanup */
+ flb_http_client_destroy(c);
+ flb_sds_destroy(pack);
+
+ if (final_payload_buf != pack) {
+ flb_free(final_payload_buf);
+ }
+
+ flb_upstream_conn_release(u_conn);
+ if (signature) {
+ flb_sds_destroy(signature);
+ }
+ FLB_OUTPUT_RETURN(FLB_OK);
+
+ /* Issue a retry */
+ retry:
+ flb_http_client_destroy(c);
+ flb_sds_destroy(pack);
+
+ if (final_payload_buf != pack) {
+ flb_free(final_payload_buf);
+ }
+
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+}
+
+static int cb_opensearch_exit(void *data, struct flb_config *config)
+{
+ struct flb_opensearch *ctx = data;
+
+ flb_os_conf_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "index", FLB_OS_DEFAULT_INDEX,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, index),
+ "Set an index name"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "type", FLB_OS_DEFAULT_TYPE,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, type),
+ "Set the document type property"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "suppress_type_name", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, suppress_type_name),
+ "If true, mapping types is removed. (for v7.0.0 or later)"
+ },
+
+ /* HTTP Authentication */
+ {
+ FLB_CONFIG_MAP_STR, "http_user", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, http_user),
+ "Optional username credential for access"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "http_passwd", "",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, http_passwd),
+ "Password for user defined in 'http_user'"
+ },
+
+ /* AWS Authentication */
+#ifdef FLB_HAVE_AWS
+ {
+ FLB_CONFIG_MAP_BOOL, "aws_auth", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, has_aws_auth),
+ "Enable AWS Sigv4 Authentication"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_region", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, aws_region),
+ "AWS Region of your Amazon OpenSearch Service cluster"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_profile", "default",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, aws_profile),
+ "AWS Profile name. AWS Profiles can be configured with AWS CLI and are usually stored in "
+ "$HOME/.aws/ directory."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_sts_endpoint", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, aws_sts_endpoint),
+ "Custom endpoint for the AWS STS API, used with the AWS_Role_ARN option"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_role_arn", NULL,
+ 0, FLB_FALSE, 0,
+ "AWS IAM Role to assume to put records to your Amazon OpenSearch cluster"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_external_id", NULL,
+ 0, FLB_FALSE, 0,
+ "External ID for the AWS IAM Role specified with `aws_role_arn`"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_service_name", "es",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, aws_service_name),
+ "AWS Service Name"
+ },
+#endif
+
+ /* Logstash compatibility */
+ {
+ FLB_CONFIG_MAP_BOOL, "logstash_format", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, logstash_format),
+ "Enable Logstash format compatibility"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "logstash_prefix", FLB_OS_DEFAULT_PREFIX,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, logstash_prefix),
+ "When Logstash_Format is enabled, the Index name is composed using a prefix "
+ "and the date, e.g: If Logstash_Prefix is equals to 'mydata' your index will "
+ "become 'mydata-YYYY.MM.DD'. The last string appended belongs to the date "
+ "when the data is being generated"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "logstash_prefix_separator", "-",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, logstash_prefix_separator),
+ "Set a separator between logstash_prefix and date."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "logstash_prefix_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, logstash_prefix_key),
+ "When included: the value in the record that belongs to the key will be looked "
+ "up and over-write the Logstash_Prefix for index generation. If the key/value "
+ "is not found in the record then the Logstash_Prefix option will act as a "
+ "fallback. Nested keys are supported through record accessor pattern"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "logstash_dateformat", FLB_OS_DEFAULT_TIME_FMT,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, logstash_dateformat),
+ "Time format (based on strftime) to generate the second part of the Index name"
+ },
+
+ /* Custom Time and Tag keys */
+ {
+ FLB_CONFIG_MAP_STR, "time_key", FLB_OS_DEFAULT_TIME_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, time_key),
+ "When Logstash_Format is enabled, each record will get a new timestamp field. "
+ "The Time_Key property defines the name of that field"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "time_key_format", FLB_OS_DEFAULT_TIME_KEYF,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, time_key_format),
+ "When Logstash_Format is enabled, this property defines the format of the "
+ "timestamp"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "time_key_nanos", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, time_key_nanos),
+ "When Logstash_Format is enabled, enabling this property sends nanosecond "
+ "precision timestamps"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "include_tag_key", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, include_tag_key),
+ "When enabled, it append the Tag name to the record"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "tag_key", FLB_OS_DEFAULT_TAG_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, tag_key),
+ "When Include_Tag_Key is enabled, this property defines the key name for the tag"
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_size", FLB_OS_DEFAULT_HTTP_MAX,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, buffer_size),
+ "Specify the buffer size used to read the response from the OpenSearch HTTP "
+ "service. This option is useful for debugging purposes where is required to read "
+ "full responses, note that response size grows depending of the number of records "
+ "inserted. To set an unlimited amount of memory set this value to 'false', "
+ "otherwise the value must be according to the Unit Size specification"
+ },
+
+ /* OpenSearch specifics */
+ {
+ FLB_CONFIG_MAP_STR, "path", NULL,
+ 0, FLB_FALSE, 0,
+ "OpenSearch accepts new data on HTTP query path '/_bulk'. But it is also "
+ "possible to serve OpenSearch behind a reverse proxy on a subpath. This "
+ "option defines such path on the fluent-bit side. It simply adds a path "
+ "prefix in the indexing HTTP POST URI"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "pipeline", NULL,
+ 0, FLB_FALSE, 0,
+ "OpenSearch allows to setup filters called pipelines. "
+ "This option allows to define which pipeline the database should use. For "
+ "performance reasons is strongly suggested to do parsing and filtering on "
+ "Fluent Bit side, avoid pipelines"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "generate_id", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, generate_id),
+ "When enabled, generate _id for outgoing records. This prevents duplicate "
+ "records when retrying"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "write_operation", "create",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, write_operation),
+ "Operation to use to write in bulk requests"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "id_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, id_key),
+ "If set, _id will be the value of the key from incoming record."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "replace_dots", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, replace_dots),
+ "When enabled, replace field name dots with underscore."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "current_time_index", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, current_time_index),
+ "Use current time for index generation instead of message record"
+ },
+
+ /* Trace */
+ {
+ FLB_CONFIG_MAP_BOOL, "trace_output", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, trace_output),
+ "When enabled print the OpenSearch API calls to stdout (for diag only)"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "trace_error", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, trace_error),
+ "When enabled print the OpenSearch exception to stderr (for diag only)"
+ },
+
+ /* HTTP Compression */
+ {
+ FLB_CONFIG_MAP_STR, "compress", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, compression_str),
+ "Set payload compression mechanism. Option available is 'gzip'"
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_opensearch_plugin = {
+ .name = "opensearch",
+ .description = "OpenSearch",
+ .cb_init = cb_opensearch_init,
+ .cb_pre_run = NULL,
+ .cb_flush = cb_opensearch_flush,
+ .cb_exit = cb_opensearch_exit,
+
+ /* Configuration */
+ .config_map = config_map,
+
+ /* Events supported */
+ .event_type = FLB_OUTPUT_LOGS | FLB_OUTPUT_TRACES,
+
+ /* Test */
+ .test_formatter.callback = opensearch_format,
+
+ /* Plugin flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_opensearch/opensearch.h b/src/fluent-bit/plugins/out_opensearch/opensearch.h
new file mode 100644
index 000000000..a1087c1da
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opensearch/opensearch.h
@@ -0,0 +1,155 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_OPENSEARCH_H
+#define FLB_OUT_OPENSEARCH_H
+
+/* config defaults */
+#define FLB_OS_DEFAULT_HOST "127.0.0.1"
+#define FLB_OS_DEFAULT_PORT 92000
+#define FLB_OS_DEFAULT_INDEX "fluent-bit"
+#define FLB_OS_DEFAULT_TYPE "_doc"
+#define FLB_OS_DEFAULT_PREFIX "logstash"
+#define FLB_OS_DEFAULT_TIME_FMT "%Y.%m.%d"
+#define FLB_OS_DEFAULT_TIME_KEY "@timestamp"
+#define FLB_OS_DEFAULT_TIME_KEYF "%Y-%m-%dT%H:%M:%S"
+#define FLB_OS_DEFAULT_TAG_KEY "flb-key"
+#define FLB_OS_DEFAULT_HTTP_MAX "512k"
+#define FLB_OS_WRITE_OP_INDEX "index"
+#define FLB_OS_WRITE_OP_CREATE "create"
+#define FLB_OS_WRITE_OP_UPDATE "update"
+#define FLB_OS_WRITE_OP_UPSERT "upsert"
+
+/* macros */
+#define FLB_OS_HEADER_SIZE 1024
+#define OS_BULK_CHUNK 4096 /* Size of buffer chunks */
+#define OS_BULK_HEADER 165 /* Bulk API prefix line */
+
+/* Bulk formats */
+#define OS_BULK_INDEX_FMT "{\"%s\":{\"_index\":\"%s\",\"_type\":\"%s\"}}\n"
+#define OS_BULK_INDEX_FMT_ID "{\"%s\":{\"_index\":\"%s\",\"_type\":\"%s\",\"_id\":\"%s\"}}\n"
+#define OS_BULK_INDEX_FMT_NO_TYPE "{\"%s\":{\"_index\":\"%s\"}}\n"
+#define OS_BULK_INDEX_FMT_ID_NO_TYPE "{\"%s\":{\"_index\":\"%s\",\"_id\":\"%s\"}}\n"
+
+/* Bulk write-type operations */
+#define OS_BULK_UPDATE_OP_BODY "{\"doc\":"
+#define OS_BULK_UPSERT_OP_BODY "{\"doc_as_upsert\":true,\"doc\":"
+
+/* Supported compression algorithms */
+#define FLB_OS_COMPRESSION_NONE 0
+#define FLB_OS_COMPRESSION_GZIP 1
+
+struct flb_opensearch {
+ /* OpenSearch index (database) and type (table) */
+ flb_sds_t index;
+ struct flb_record_accessor *ra_index;
+
+ char *type;
+ char suppress_type_name;
+
+ /* HTTP Auth */
+ char *http_user;
+ char *http_passwd;
+
+ /* AWS Auth */
+#ifdef FLB_HAVE_AWS
+ int has_aws_auth;
+ char *aws_region;
+ char *aws_sts_endpoint;
+ char *aws_profile;
+ struct flb_aws_provider *aws_provider;
+ struct flb_aws_provider *base_aws_provider;
+ /* tls instances can't be re-used; aws provider requires a separate one */
+ struct flb_tls *aws_tls;
+ /* one for the standard chain provider, one for sts assume role */
+ struct flb_tls *aws_sts_tls;
+ char *aws_session_name;
+ char *aws_service_name;
+ struct mk_list *aws_unsigned_headers;
+#endif
+
+ /* HTTP Client Setup */
+ size_t buffer_size;
+
+ /* If enabled, replace field name dots with underscore */
+ int replace_dots;
+
+ int trace_output;
+ int trace_error;
+
+ /*
+ * Logstash compatibility options
+ * ==============================
+ */
+
+ /* enabled/disabled */
+ int logstash_format;
+ int generate_id;
+ int current_time_index;
+
+ /* prefix */
+ flb_sds_t logstash_prefix;
+ flb_sds_t logstash_prefix_separator;
+
+ /* prefix key */
+ flb_sds_t logstash_prefix_key;
+
+ /* date format */
+ flb_sds_t logstash_dateformat;
+
+ /* time key */
+ flb_sds_t time_key;
+
+ /* time key format */
+ flb_sds_t time_key_format;
+
+ /* time key nanoseconds */
+ int time_key_nanos;
+
+ /* write operation config value */
+ flb_sds_t write_operation;
+
+ /* write operation / action */
+ char *action;
+
+ /* id_key */
+ flb_sds_t id_key;
+ struct flb_record_accessor *ra_id_key;
+
+ /* include_tag_key */
+ int include_tag_key;
+ flb_sds_t tag_key;
+
+ /* HTTP API */
+ char uri[1024];
+
+ struct flb_record_accessor *ra_prefix_key;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+
+ /* Plugin output instance reference */
+ struct flb_output_instance *ins;
+
+ /* Compression algorithm */
+ int compression;
+ flb_sds_t compression_str;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_opensearch/os_conf.c b/src/fluent-bit/plugins/out_opensearch/os_conf.c
new file mode 100644
index 000000000..b814bd35f
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opensearch/os_conf.c
@@ -0,0 +1,411 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_signv4.h>
+#include <fluent-bit/flb_aws_credentials.h>
+
+#include "opensearch.h"
+#include "os_conf.h"
+
+struct flb_opensearch *flb_os_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int len;
+ int io_flags = 0;
+ ssize_t ret;
+ char *buf;
+ const char *tmp;
+ const char *path;
+#ifdef FLB_HAVE_AWS
+ char *aws_role_arn = NULL;
+ char *aws_external_id = NULL;
+ char *aws_session_name = NULL;
+#endif
+ struct flb_uri *uri = ins->host.uri;
+ struct flb_uri_field *f_index = NULL;
+ struct flb_uri_field *f_type = NULL;
+ struct flb_upstream *upstream;
+ struct flb_opensearch *ctx;
+
+ /* Allocate context */
+ ctx = flb_calloc(1, sizeof(struct flb_opensearch));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ /* only used if the config has been set from the command line */
+ if (uri) {
+ if (uri->count >= 2) {
+ f_index = flb_uri_get(uri, 0);
+ f_type = flb_uri_get(uri, 1);
+ }
+ }
+
+ /* Set default network configuration */
+ flb_output_net_default("127.0.0.1", 9200, ins);
+
+ /* Populate context with config map defaults and incoming properties */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "configuration error");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* use TLS ? */
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ }
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ /* Prepare an upstream handler */
+ upstream = flb_upstream_create(config,
+ ins->host.name,
+ ins->host.port,
+ io_flags,
+ ins->tls);
+ if (!upstream) {
+ flb_plg_error(ctx->ins, "cannot create Upstream context");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->u = upstream;
+
+ /* Set instance flags into upstream */
+ flb_output_upstream_set(ctx->u, ins);
+
+ /* Set manual Index and Type */
+ if (f_index) {
+ ctx->index = flb_strdup(f_index->value);
+ }
+ else {
+ /* Check if the index has been set in the configuration */
+ if (ctx->index) {
+ /* do we have a record accessor pattern ? */
+ if (strchr(ctx->index, '$')) {
+ ctx->ra_index = flb_ra_create(ctx->index, FLB_TRUE);
+ if (!ctx->ra_index) {
+ flb_plg_error(ctx->ins, "invalid record accessor pattern set for 'index' property");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+ }
+ }
+
+ if (f_type) {
+ ctx->type = flb_strdup(f_type->value); /* FIXME */
+ }
+
+ /* HTTP Payload (response) maximum buffer size (0 == unlimited) */
+ if (ctx->buffer_size == -1) {
+ ctx->buffer_size = 0;
+ }
+
+ /* Path */
+ path = flb_output_get_property("path", ins);
+ if (!path) {
+ path = "";
+ }
+
+ /* Pipeline */
+ tmp = flb_output_get_property("pipeline", ins);
+ if (tmp) {
+ snprintf(ctx->uri, sizeof(ctx->uri) - 1, "%s/_bulk/?pipeline=%s", path, tmp);
+ }
+ else {
+ snprintf(ctx->uri, sizeof(ctx->uri) - 1, "%s/_bulk", path);
+ }
+
+
+ if (ctx->id_key) {
+ ctx->ra_id_key = flb_ra_create(ctx->id_key, FLB_FALSE);
+ if (ctx->ra_id_key == NULL) {
+ flb_plg_error(ins, "could not create record accessor for Id Key");
+ }
+ if (ctx->generate_id == FLB_TRUE) {
+ flb_plg_warn(ins, "Generate_ID is ignored when ID_key is set");
+ ctx->generate_id = FLB_FALSE;
+ }
+ }
+
+ if (ctx->write_operation) {
+ if (strcasecmp(ctx->write_operation, FLB_OS_WRITE_OP_INDEX) == 0) {
+ ctx->action = FLB_OS_WRITE_OP_INDEX;
+ }
+ else if (strcasecmp(ctx->write_operation, FLB_OS_WRITE_OP_CREATE) == 0) {
+ ctx->action = FLB_OS_WRITE_OP_CREATE;
+ }
+ else if (strcasecmp(ctx->write_operation, FLB_OS_WRITE_OP_UPDATE) == 0
+ || strcasecmp(ctx->write_operation, FLB_OS_WRITE_OP_UPSERT) == 0) {
+ ctx->action = FLB_OS_WRITE_OP_UPDATE;
+ }
+ else {
+ flb_plg_error(ins,
+ "wrong Write_Operation (should be one of index, "
+ "create, update, upsert)");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ if (strcasecmp(ctx->action, FLB_OS_WRITE_OP_UPDATE) == 0
+ && !ctx->ra_id_key && ctx->generate_id == FLB_FALSE) {
+ flb_plg_error(ins,
+ "id_key or generate_id must be set when Write_Operation "
+ "update or upsert");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ if (ctx->logstash_prefix_key) {
+ if (ctx->logstash_prefix_key[0] != '$') {
+ len = flb_sds_len(ctx->logstash_prefix_key);
+ buf = flb_malloc(len + 2);
+ if (!buf) {
+ flb_errno();
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+ buf[0] = '$';
+ memcpy(buf + 1, ctx->logstash_prefix_key, len);
+ buf[len + 1] = '\0';
+
+ ctx->ra_prefix_key = flb_ra_create(buf, FLB_TRUE);
+ flb_free(buf);
+ }
+ else {
+ ctx->ra_prefix_key = flb_ra_create(ctx->logstash_prefix_key, FLB_TRUE);
+ }
+
+ if (!ctx->ra_prefix_key) {
+ flb_plg_error(ins, "invalid logstash_prefix_key pattern '%s'", tmp);
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ if (ctx->compression_str) {
+ if (strcasecmp(ctx->compression_str, "gzip") == 0) {
+ ctx->compression = FLB_OS_COMPRESSION_GZIP;
+ }
+ else {
+ ctx->compression = FLB_OS_COMPRESSION_NONE;
+ }
+ }
+ else {
+ ctx->compression = FLB_OS_COMPRESSION_NONE;
+ }
+
+#ifdef FLB_HAVE_AWS
+ /* AWS Auth Unsigned Headers */
+ ctx->aws_unsigned_headers = flb_malloc(sizeof(struct mk_list));
+ if (!ctx->aws_unsigned_headers) {
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+ flb_slist_create(ctx->aws_unsigned_headers);
+ ret = flb_slist_add(ctx->aws_unsigned_headers, "Content-Length");
+ if (ret != 0) {
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* AWS Auth */
+ ctx->has_aws_auth = FLB_FALSE;
+ tmp = flb_output_get_property("aws_auth", ins);
+ if (tmp) {
+ if (strncasecmp(tmp, "On", 2) == 0) {
+ ctx->has_aws_auth = FLB_TRUE;
+ flb_debug("[out_es] Enabled AWS Auth");
+
+ /* AWS provider needs a separate TLS instance */
+ ctx->aws_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+ if (!ctx->aws_tls) {
+ flb_errno();
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ tmp = flb_output_get_property("aws_region", ins);
+ if (!tmp) {
+ flb_error("[out_es] aws_auth enabled but aws_region not set");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->aws_region = (char *) tmp;
+
+ tmp = flb_output_get_property("aws_sts_endpoint", ins);
+ if (tmp) {
+ ctx->aws_sts_endpoint = (char *) tmp;
+ }
+
+ ctx->aws_provider = flb_standard_chain_provider_create(config,
+ ctx->aws_tls,
+ ctx->aws_region,
+ ctx->aws_sts_endpoint,
+ NULL,
+ flb_aws_client_generator(),
+ ctx->aws_profile);
+ if (!ctx->aws_provider) {
+ flb_error("[out_es] Failed to create AWS Credential Provider");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ tmp = flb_output_get_property("aws_role_arn", ins);
+ if (tmp) {
+ /* Use the STS Provider */
+ ctx->base_aws_provider = ctx->aws_provider;
+ aws_role_arn = (char *) tmp;
+ aws_external_id = NULL;
+ tmp = flb_output_get_property("aws_external_id", ins);
+ if (tmp) {
+ aws_external_id = (char *) tmp;
+ }
+
+ aws_session_name = flb_sts_session_name();
+ if (!aws_session_name) {
+ flb_error("[out_es] Failed to create aws iam role "
+ "session name");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* STS provider needs yet another separate TLS instance */
+ ctx->aws_sts_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+ if (!ctx->aws_sts_tls) {
+ flb_errno();
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ ctx->aws_provider = flb_sts_provider_create(config,
+ ctx->aws_sts_tls,
+ ctx->
+ base_aws_provider,
+ aws_external_id,
+ aws_role_arn,
+ aws_session_name,
+ ctx->aws_region,
+ ctx->aws_sts_endpoint,
+ NULL,
+ flb_aws_client_generator());
+ /* Session name can be freed once provider is created */
+ flb_free(aws_session_name);
+ if (!ctx->aws_provider) {
+ flb_error("[out_es] Failed to create AWS STS Credential "
+ "Provider");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ }
+
+ /* initialize credentials in sync mode */
+ ctx->aws_provider->provider_vtable->sync(ctx->aws_provider);
+ ctx->aws_provider->provider_vtable->init(ctx->aws_provider);
+ /* set back to async */
+ ctx->aws_provider->provider_vtable->async(ctx->aws_provider);
+ ctx->aws_provider->provider_vtable->upstream_set(ctx->aws_provider, ctx->ins);
+ }
+ }
+#endif
+
+ return ctx;
+}
+
+int flb_os_conf_destroy(struct flb_opensearch *ctx)
+{
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+ if (ctx->ra_id_key) {
+ flb_ra_destroy(ctx->ra_id_key);
+ ctx->ra_id_key = NULL;
+ }
+
+#ifdef FLB_HAVE_AWS
+ if (ctx->base_aws_provider) {
+ flb_aws_provider_destroy(ctx->base_aws_provider);
+ }
+
+ if (ctx->aws_provider) {
+ flb_aws_provider_destroy(ctx->aws_provider);
+ }
+
+ if (ctx->aws_tls) {
+ flb_tls_destroy(ctx->aws_tls);
+ }
+
+ if (ctx->aws_sts_tls) {
+ flb_tls_destroy(ctx->aws_sts_tls);
+ }
+
+ if (ctx->aws_unsigned_headers) {
+ flb_slist_destroy(ctx->aws_unsigned_headers);
+ flb_free(ctx->aws_unsigned_headers);
+ }
+#endif
+
+ if (ctx->ra_prefix_key) {
+ flb_ra_destroy(ctx->ra_prefix_key);
+ }
+
+ if (ctx->ra_index) {
+ flb_ra_destroy(ctx->ra_index);
+ }
+
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_opensearch/os_conf.h b/src/fluent-bit/plugins/out_opensearch/os_conf.h
new file mode 100644
index 000000000..a48376a07
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opensearch/os_conf.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_OPENSEARCH_CONF_H
+#define FLB_OUT_OPENSEARCH_CONF_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_config.h>
+
+#include "opensearch.h"
+
+struct flb_opensearch *flb_os_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+int flb_os_conf_destroy(struct flb_opensearch *ctx);
+
+#endif