diff options
Diffstat (limited to 'src/fluent-bit/plugins/out_cloudwatch_logs')
5 files changed, 2454 insertions, 0 deletions
diff --git a/src/fluent-bit/plugins/out_cloudwatch_logs/CMakeLists.txt b/src/fluent-bit/plugins/out_cloudwatch_logs/CMakeLists.txt new file mode 100644 index 000000000..9e48217aa --- /dev/null +++ b/src/fluent-bit/plugins/out_cloudwatch_logs/CMakeLists.txt @@ -0,0 +1,5 @@ +set(src + cloudwatch_logs.c + cloudwatch_api.c) + +FLB_PLUGIN(out_cloudwatch_logs "${src}" "") diff --git a/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_api.c b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_api.c new file mode 100644 index 000000000..8043968cf --- /dev/null +++ b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_api.c @@ -0,0 +1,1564 @@ +/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ + +/* Fluent Bit + * ========== + * Copyright (C) 2015-2022 The Fluent Bit Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <fluent-bit/flb_compat.h> +#include <fluent-bit/flb_info.h> +#include <fluent-bit/flb_output.h> +#include <fluent-bit/flb_utils.h> +#include <fluent-bit/flb_slist.h> +#include <fluent-bit/flb_time.h> +#include <fluent-bit/flb_pack.h> +#include <fluent-bit/flb_macros.h> +#include <fluent-bit/flb_config_map.h> +#include <fluent-bit/flb_output_plugin.h> +#include <fluent-bit/flb_log_event_decoder.h> + +#include <fluent-bit/flb_sds.h> +#include <fluent-bit/flb_aws_credentials.h> +#include <fluent-bit/flb_aws_util.h> +#include <fluent-bit/flb_mem.h> +#include <fluent-bit/flb_info.h> +#include <fluent-bit/flb_http_client.h> +#include <fluent-bit/flb_utils.h> +#include <fluent-bit/flb_intermediate_metric.h> + +#include <monkey/mk_core.h> +#include <msgpack.h> +#include <string.h> +#include <stdio.h> + +#ifndef FLB_SYSTEM_WINDOWS +#include <unistd.h> +#endif + +#include "cloudwatch_api.h" + +#define ERR_CODE_ALREADY_EXISTS "ResourceAlreadyExistsException" +#define ERR_CODE_INVALID_SEQUENCE_TOKEN "InvalidSequenceTokenException" +#define ERR_CODE_NOT_FOUND "ResourceNotFoundException" +#define ERR_CODE_DATA_ALREADY_ACCEPTED "DataAlreadyAcceptedException" + +#define AMZN_REQUEST_ID_HEADER "x-amzn-RequestId" + +#define ONE_DAY_IN_MILLISECONDS 86400000 +#define FOUR_HOURS_IN_SECONDS 14400 + + +static struct flb_aws_header create_group_header = { + .key = "X-Amz-Target", + .key_len = 12, + .val = "Logs_20140328.CreateLogGroup", + .val_len = 28, +}; + +static struct flb_aws_header put_retention_policy_header = { + .key = "X-Amz-Target", + .key_len = 12, + .val = "Logs_20140328.PutRetentionPolicy", + .val_len = 32, +}; + +static struct flb_aws_header create_stream_header = { + .key = "X-Amz-Target", + .key_len = 12, + .val = "Logs_20140328.CreateLogStream", + .val_len = 29, +}; + +static struct flb_aws_header put_log_events_header[] = { + { + .key = "X-Amz-Target", + .key_len = 12, + .val = "Logs_20140328.PutLogEvents", + .val_len = 26, + }, + { + .key = "x-amzn-logs-format", + .key_len = 18, + .val = "", + .val_len = 0, + }, +}; + +int plugin_under_test() +{ + if (getenv("FLB_CLOUDWATCH_PLUGIN_UNDER_TEST") != NULL) { + return FLB_TRUE; + } + + return FLB_FALSE; +} + +char *mock_error_response(char *error_env_var) +{ + char *err_val = NULL; + char *error = NULL; + int len = 0; + + err_val = getenv(error_env_var); + if (err_val != NULL && strlen(err_val) > 0) { + error = flb_malloc(strlen(err_val) + sizeof(char)); + if (error == NULL) { + flb_errno(); + return NULL; + } + + len = strlen(err_val); + memcpy(error, err_val, len); + error[len] = '\0'; + return error; + } + + return NULL; +} + +struct flb_http_client *mock_http_call(char *error_env_var, char *api) +{ + /* create an http client so that we can set the response */ + struct flb_http_client *c = NULL; + char *error = mock_error_response(error_env_var); + + c = flb_calloc(1, sizeof(struct flb_http_client)); + if (!c) { + flb_errno(); + flb_free(error); + return NULL; + } + mk_list_init(&c->headers); + + if (error != NULL) { + c->resp.status = 400; + /* resp.data is freed on destroy, payload is supposed to reference it */ + c->resp.data = error; + c->resp.payload = c->resp.data; + c->resp.payload_size = strlen(error); + } + else { + c->resp.status = 200; + c->resp.payload = ""; + c->resp.payload_size = 0; + if (strcmp(api, "PutLogEvents") == 0) { + /* mocked success response */ + c->resp.payload = "{\"nextSequenceToken\": \"" + "49536701251539826331025683274032969384950891766572122113\"}"; + c->resp.payload_size = strlen(c->resp.payload); + } + else { + c->resp.payload = ""; + c->resp.payload_size = 0; + } + } + + return c; +} + +int compare_events(const void *a_arg, const void *b_arg) +{ + struct cw_event *r_a = (struct cw_event *) a_arg; + struct cw_event *r_b = (struct cw_event *) b_arg; + + if (r_a->timestamp < r_b->timestamp) { + return -1; + } + else if (r_a->timestamp == r_b->timestamp) { + return 0; + } + else { + return 1; + } +} + +static inline int try_to_write(char *buf, int *off, size_t left, + const char *str, size_t str_len) +{ + if (str_len <= 0){ + str_len = strlen(str); + } + if (left <= *off+str_len) { + return FLB_FALSE; + } + memcpy(buf+*off, str, str_len); + *off += str_len; + return FLB_TRUE; +} + +/* + * Writes the "header" for a put log events payload + */ +static int init_put_payload(struct flb_cloudwatch *ctx, struct cw_flush *buf, + struct log_stream *stream, int *offset) +{ + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "{\"logGroupName\":\"", 17)) { + goto error; + } + + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + stream->group, 0)) { + goto error; + } + + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "\",\"logStreamName\":\"", 19)) { + goto error; + } + + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + stream->name, 0)) { + goto error; + } + + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "\",", 2)) { + goto error; + } + + if (stream->sequence_token) { + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "\"sequenceToken\":\"", 17)) { + goto error; + } + + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + stream->sequence_token, 0)) { + goto error; + } + + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "\",", 2)) { + goto error; + } + } + + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "\"logEvents\":[", 13)) { + goto error; + } + + return 0; + +error: + return -1; +} + +/* + * Writes a log event to the output buffer + */ +static int write_event(struct flb_cloudwatch *ctx, struct cw_flush *buf, + struct cw_event *event, int *offset) +{ + char ts[50]; + + if (!snprintf(ts, 50, "%llu", event->timestamp)) { + goto error; + } + + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "{\"timestamp\":", 13)) { + goto error; + } + + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + ts, 0)) { + goto error; + } + + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + ",\"message\":\"", 12)) { + goto error; + } + + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + event->json, event->len)) { + goto error; + } + + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "\"}", 2)) { + goto error; + } + + return 0; + +error: + return -1; +} + +/* Terminates a PutLogEvents payload */ +static int end_put_payload(struct flb_cloudwatch *ctx, struct cw_flush *buf, + int *offset) +{ + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "]}", 2)) { + return -1; + } + buf->out_buf[*offset] = '\0'; + + return 0; +} + +static unsigned long long stream_time_span(struct log_stream *stream, + struct cw_event *event) +{ + if (stream->oldest_event == 0 || stream->newest_event == 0) { + return 0; + } + + if (stream->oldest_event > event->timestamp) { + return stream->newest_event - event->timestamp; + } + else if (stream->newest_event < event->timestamp) { + return event->timestamp - stream->oldest_event; + } + + return stream->newest_event - stream->oldest_event; +} + +/* returns FLB_TRUE if time span is less than 24 hours, FLB_FALSE if greater */ +static int check_stream_time_span(struct log_stream *stream, + struct cw_event *event) +{ + unsigned long long span = stream_time_span(stream, event); + + if (span < ONE_DAY_IN_MILLISECONDS) { + return FLB_TRUE; + } + + return FLB_FALSE; +} + +/* sets the oldest_event and newest_event fields */ +static void set_stream_time_span(struct log_stream *stream, struct cw_event *event) +{ + if (stream->oldest_event == 0 || stream->oldest_event > event->timestamp) { + stream->oldest_event = event->timestamp; + } + + if (stream->newest_event == 0 || stream->newest_event < event->timestamp) { + stream->newest_event = event->timestamp; + } +} + +/* + * Truncate log if needed. If truncated, only `written` is modified + * returns FLB_TRUE if truncated + */ +static int truncate_log(const struct flb_cloudwatch *ctx, const char *log_buffer, + size_t *written) { + size_t trailing_backslash_count = 0; + + if (*written > MAX_EVENT_LEN) { + flb_plg_warn(ctx->ins, "[size=%zu] Truncating event which is larger than " + "max size allowed by CloudWatch", *written); + *written = MAX_EVENT_LEN; + + /* remove trailing unescaped backslash if inadvertently synthesized */ + while (trailing_backslash_count < *written && + log_buffer[(*written - 1) - trailing_backslash_count] == '\\') { + trailing_backslash_count++; + } + if (trailing_backslash_count % 2 == 1) { + /* odd number of trailing backslashes, remove unpaired backslash */ + (*written)--; + } + return FLB_TRUE; + } + return FLB_FALSE; +} + + +/* + * Processes the msgpack object + * -1 = failure, record not added + * 0 = success, record added + * 1 = we ran out of space, send and retry + * 2 = record could not be processed, discard it + * Returns 0 on success, -1 on general errors, + * and 1 if we ran out of space to write the event + * which means a send must occur + */ +int process_event(struct flb_cloudwatch *ctx, struct cw_flush *buf, + const msgpack_object *obj, struct flb_time *tms) +{ + size_t written; + int ret; + size_t size; + int offset = 0; + struct cw_event *event; + char *tmp_buf_ptr; + + tmp_buf_ptr = buf->tmp_buf + buf->tmp_buf_offset; + ret = flb_msgpack_to_json(tmp_buf_ptr, + buf->tmp_buf_size - buf->tmp_buf_offset, + obj); + if (ret <= 0) { + /* + * failure to write to buffer, + * which means we ran out of space, and must send the logs + */ + return 1; + } + written = (size_t) ret; + /* Discard empty messages (written == 2 means '""') */ + if (written <= 2) { + flb_plg_debug(ctx->ins, "Found empty log message"); + return 2; + } + + /* the json string must be escaped, unless the log_key option is used */ + if (ctx->log_key == NULL) { + /* + * check if event_buf is initialized and big enough + * If all chars need to be hex encoded (impossible), 6x space would be + * needed + */ + size = written * 6; + if (buf->event_buf == NULL || buf->event_buf_size < size) { + flb_free(buf->event_buf); + buf->event_buf = flb_malloc(size); + buf->event_buf_size = size; + if (buf->event_buf == NULL) { + flb_errno(); + return -1; + } + } + offset = 0; + if (!flb_utils_write_str(buf->event_buf, &offset, size, + tmp_buf_ptr, written)) { + return -1; + } + written = offset; + + tmp_buf_ptr = buf->tmp_buf + buf->tmp_buf_offset; + if ((buf->tmp_buf_size - buf->tmp_buf_offset) < written) { + /* not enough space, send logs */ + return 1; + } + + /* truncate log, if needed */ + truncate_log(ctx, buf->event_buf, &written); + + /* copy serialized json to tmp_buf */ + if (!strncpy(tmp_buf_ptr, buf->event_buf, written)) { + return -1; + } + } + else { + /* + * flb_msgpack_to_json will encase the value in quotes + * We don't want that for log_key, so we ignore the first + * and last character + */ + written -= 2; + tmp_buf_ptr++; /* pass over the opening quote */ + buf->tmp_buf_offset++; /* advance tmp_buf past opening quote */ + + /* truncate log, if needed */ + truncate_log(ctx, tmp_buf_ptr, &written); + } + + /* add log to events list */ + buf->tmp_buf_offset += written; + event = &buf->events[buf->event_index]; + event->json = tmp_buf_ptr; + event->len = written; + event->timestamp = (unsigned long long) (tms->tm.tv_sec * 1000ull + + tms->tm.tv_nsec/1000000); + + return 0; +} + +/* Resets or inits a cw_flush struct */ +void reset_flush_buf(struct flb_cloudwatch *ctx, struct cw_flush *buf) { + buf->event_index = 0; + buf->tmp_buf_offset = 0; + buf->event_index = 0; + buf->data_size = PUT_LOG_EVENTS_HEADER_LEN + PUT_LOG_EVENTS_FOOTER_LEN; + if (buf->current_stream != NULL) { + buf->data_size += strlen(buf->current_stream->name); + buf->data_size += strlen(buf->current_stream->group); + if (buf->current_stream->sequence_token) { + buf->data_size += strlen(buf->current_stream->sequence_token); + } + } +} + +/* sorts events, constructs a put payload, and then sends */ +int send_log_events(struct flb_cloudwatch *ctx, struct cw_flush *buf) { + int ret; + int offset; + int i; + struct cw_event *event; + + if (buf->event_index <= 0) { + return 0; + } + + /* events must be sorted by timestamp in a put payload */ + qsort(buf->events, buf->event_index, sizeof(struct cw_event), compare_events); + +retry: + buf->current_stream->newest_event = 0; + buf->current_stream->oldest_event = 0; + + offset = 0; + ret = init_put_payload(ctx, buf, buf->current_stream, &offset); + if (ret < 0) { + flb_plg_error(ctx->ins, "Failed to initialize PutLogEvents payload"); + return -1; + } + + for (i = 0; i < buf->event_index; i++) { + event = &buf->events[i]; + ret = write_event(ctx, buf, event, &offset); + if (ret < 0) { + flb_plg_error(ctx->ins, "Failed to write log event %d to " + "payload buffer", i); + return -1; + } + if (i != (buf->event_index - 1)) { + if (!try_to_write(buf->out_buf, &offset, buf->out_buf_size, + ",", 1)) { + flb_plg_error(ctx->ins, "Could not terminate log event with ','"); + return -1; + } + } + } + + ret = end_put_payload(ctx, buf, &offset); + if (ret < 0) { + flb_plg_error(ctx->ins, "Could not complete PutLogEvents payload"); + return -1; + } + + flb_plg_debug(ctx->ins, "cloudwatch:PutLogEvents: events=%d, payload=%d bytes", i, offset); + ret = put_log_events(ctx, buf, buf->current_stream, (size_t) offset); + if (ret < 0) { + flb_plg_error(ctx->ins, "Failed to send log events"); + return -1; + } + else if (ret > 0) { + goto retry; + } + + return 0; +} + + /* + * Processes the msgpack object, sends the current batch if needed + * -1 = failure, event not added + * 0 = success, event added + * 1 = event been skipped + * Returns 0 on success, -1 on general errors, + * and 1 if we found a empty event or a large event. + */ +int add_event(struct flb_cloudwatch *ctx, struct cw_flush *buf, + struct log_stream *stream, + const msgpack_object *obj, struct flb_time *tms) +{ + int ret; + struct cw_event *event; + int retry_add = FLB_FALSE; + int event_bytes = 0; + + if (buf->event_index > 0 && buf->current_stream != stream) { + /* we already have events for a different stream, send them first */ + retry_add = FLB_TRUE; + goto send; + } + +retry_add_event: + buf->current_stream = stream; + retry_add = FLB_FALSE; + if (buf->event_index == 0) { + /* init */ + reset_flush_buf(ctx, buf); + } + + ret = process_event(ctx, buf, obj, tms); + if (ret < 0) { + return -1; + } + else if (ret == 1) { + if (buf->event_index <= 0) { + /* somehow the record was larger than our entire request buffer */ + flb_plg_warn(ctx->ins, "Discarding massive log record"); + return 1; /* discard this record and return to caller */ + } + /* send logs and then retry the add */ + retry_add = FLB_TRUE; + goto send; + } + else if (ret == 2) { + /* + * discard this record and return to caller + * only happens for empty records in this plugin + */ + return 1; + } + + event = &buf->events[buf->event_index]; + event_bytes = event->len + PUT_LOG_EVENTS_PER_EVENT_LEN; + + if (check_stream_time_span(stream, event) == FLB_FALSE) { + /* do not send this event */ + retry_add = FLB_TRUE; + goto send; + } + + if ((buf->data_size + event_bytes) > PUT_LOG_EVENTS_PAYLOAD_SIZE) { + if (buf->event_index <= 0) { + /* somehow the record was larger than our entire request buffer */ + flb_plg_warn(ctx->ins, "Discarding massive log record"); + return 0; /* discard this record and return to caller */ + } + /* do not send this event */ + retry_add = FLB_TRUE; + goto send; + } + + buf->data_size += event_bytes; + set_stream_time_span(stream, event); + buf->event_index++; + + if (buf->event_index == MAX_EVENTS_PER_PUT) { + goto send; + } + + /* send is not needed yet, return to caller */ + return 0; + +send: + ret = send_log_events(ctx, buf); + reset_flush_buf(ctx, buf); + if (ret < 0) { + return -1; + } + + if (retry_add == FLB_TRUE) { + goto retry_add_event; + } + + return 0; +} + +int should_add_to_emf(struct flb_intermediate_metric *an_item) +{ + /* Valid for cpu plugin */ + if (strncmp(an_item->key.via.str.ptr, "cpu_", 4) == 0 + || strncmp(an_item->key.via.str.ptr, "user_p", 6) == 0 + || strncmp(an_item->key.via.str.ptr, "system_p", 8) == 0) { + return 1; + } + + /* Valid for mem plugin */ + if (strncmp(an_item->key.via.str.ptr, "Mem.total", 9) == 0 + || strncmp(an_item->key.via.str.ptr, "Mem.used", 8) == 0 + || strncmp(an_item->key.via.str.ptr, "Mem.free", 8) == 0 + || strncmp(an_item->key.via.str.ptr, "Swap.total", 10) == 0 + || strncmp(an_item->key.via.str.ptr, "Swap.used", 9) == 0 + || strncmp(an_item->key.via.str.ptr, "Swap.free", 9) == 0) { + return 1; + } + + return 0; +} + +int pack_emf_payload(struct flb_cloudwatch *ctx, + struct mk_list *flb_intermediate_metrics, + const char *input_plugin, + struct flb_time tms, + msgpack_sbuffer *mp_sbuf, + msgpack_unpacked *mp_result, + msgpack_object *emf_payload) +{ + int total_items = mk_list_size(flb_intermediate_metrics) + 1; + + struct mk_list *metric_temp; + struct mk_list *metric_head; + struct flb_intermediate_metric *an_item; + msgpack_unpack_return mp_ret; + + /* Serialize values into the buffer using msgpack_sbuffer_write */ + msgpack_packer mp_pck; + msgpack_packer_init(&mp_pck, mp_sbuf, msgpack_sbuffer_write); + msgpack_pack_map(&mp_pck, total_items); + + /* Pack the _aws map */ + msgpack_pack_str(&mp_pck, 4); + msgpack_pack_str_body(&mp_pck, "_aws", 4); + + msgpack_pack_map(&mp_pck, 2); + + msgpack_pack_str(&mp_pck, 9); + msgpack_pack_str_body(&mp_pck, "Timestamp", 9); + msgpack_pack_long_long(&mp_pck, tms.tm.tv_sec * 1000L); + + msgpack_pack_str(&mp_pck, 17); + msgpack_pack_str_body(&mp_pck, "CloudWatchMetrics", 17); + msgpack_pack_array(&mp_pck, 1); + + msgpack_pack_map(&mp_pck, 3); + + msgpack_pack_str(&mp_pck, 9); + msgpack_pack_str_body(&mp_pck, "Namespace", 9); + + if (ctx->metric_namespace) { + msgpack_pack_str(&mp_pck, flb_sds_len(ctx->metric_namespace)); + msgpack_pack_str_body(&mp_pck, ctx->metric_namespace, + flb_sds_len(ctx->metric_namespace)); + } + else { + msgpack_pack_str(&mp_pck, 18); + msgpack_pack_str_body(&mp_pck, "fluent-bit-metrics", 18); + } + + msgpack_pack_str(&mp_pck, 10); + msgpack_pack_str_body(&mp_pck, "Dimensions", 10); + + struct mk_list *head, *inner_head; + struct flb_split_entry *dimension_list, *entry; + struct mk_list *csv_values; + if (ctx->metric_dimensions) { + msgpack_pack_array(&mp_pck, mk_list_size(ctx->metric_dimensions)); + + mk_list_foreach(head, ctx->metric_dimensions) { + dimension_list = mk_list_entry(head, struct flb_split_entry, _head); + csv_values = flb_utils_split(dimension_list->value, ',', 256); + msgpack_pack_array(&mp_pck, mk_list_size(csv_values)); + + mk_list_foreach(inner_head, csv_values) { + entry = mk_list_entry(inner_head, struct flb_split_entry, _head); + msgpack_pack_str(&mp_pck, entry->len); + msgpack_pack_str_body(&mp_pck, entry->value, entry->len); + } + flb_utils_split_free(csv_values); + } + } + else { + msgpack_pack_array(&mp_pck, 0); + } + + msgpack_pack_str(&mp_pck, 7); + msgpack_pack_str_body(&mp_pck, "Metrics", 7); + + if (strcmp(input_plugin, "cpu") == 0) { + msgpack_pack_array(&mp_pck, 3); + } + else if (strcmp(input_plugin, "mem") == 0) { + msgpack_pack_array(&mp_pck, 6); + } + else { + msgpack_pack_array(&mp_pck, 0); + } + + mk_list_foreach_safe(metric_head, metric_temp, flb_intermediate_metrics) { + an_item = mk_list_entry(metric_head, struct flb_intermediate_metric, _head); + if (should_add_to_emf(an_item) == 1) { + msgpack_pack_map(&mp_pck, 2); + msgpack_pack_str(&mp_pck, 4); + msgpack_pack_str_body(&mp_pck, "Name", 4); + msgpack_pack_object(&mp_pck, an_item->key); + msgpack_pack_str(&mp_pck, 4); + msgpack_pack_str_body(&mp_pck, "Unit", 4); + msgpack_pack_str(&mp_pck, strlen(an_item->metric_unit)); + msgpack_pack_str_body(&mp_pck, an_item->metric_unit, + strlen(an_item->metric_unit)); + } + } + + /* Pack the metric values for each record */ + mk_list_foreach_safe(metric_head, metric_temp, flb_intermediate_metrics) { + an_item = mk_list_entry(metric_head, struct flb_intermediate_metric, _head); + msgpack_pack_object(&mp_pck, an_item->key); + msgpack_pack_object(&mp_pck, an_item->value); + } + + /* + * Deserialize the buffer into msgpack_object instance. + */ + + mp_ret = msgpack_unpack_next(mp_result, mp_sbuf->data, mp_sbuf->size, NULL); + + if (mp_ret != MSGPACK_UNPACK_SUCCESS) { + flb_plg_error(ctx->ins, "msgpack_unpack returned non-success value %i", mp_ret); + return -1; + } + + *emf_payload = mp_result->data; + return 0; +} + +/* + * Main routine- processes msgpack and sends in batches which ignore the empty ones + * return value is the number of events processed and send. + */ +int process_and_send(struct flb_cloudwatch *ctx, const char *input_plugin, + struct cw_flush *buf, flb_sds_t tag, + const char *data, size_t bytes) +{ + int i = 0; + size_t map_size; + msgpack_object map; + msgpack_object_kv *kv; + msgpack_object key; + msgpack_object val; + msgpack_unpacked mp_emf_result; + msgpack_object emf_payload; + /* msgpack::sbuffer is a simple buffer implementation. */ + msgpack_sbuffer mp_sbuf; + + struct log_stream *stream; + + char *key_str = NULL; + size_t key_str_size = 0; + int j; + int ret; + int check = FLB_FALSE; + int found = FLB_FALSE; + + /* Added for EMF support */ + struct flb_intermediate_metric *metric; + struct mk_list *tmp; + struct mk_list *head; + struct flb_intermediate_metric *an_item; + + int intermediate_metric_type; + char *intermediate_metric_unit; + struct flb_log_event_decoder log_decoder; + struct flb_log_event log_event; + + ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes); + + if (ret != FLB_EVENT_DECODER_SUCCESS) { + flb_plg_error(ctx->ins, + "Log event decoder initialization error : %d", ret); + + return -1; + } + + if (strncmp(input_plugin, "cpu", 3) == 0) { + intermediate_metric_type = GAUGE; + intermediate_metric_unit = PERCENT; + } + else if (strncmp(input_plugin, "mem", 3) == 0) { + intermediate_metric_type = GAUGE; + intermediate_metric_unit = BYTES; + } + + /* unpack msgpack */ + while ((ret = flb_log_event_decoder_next( + &log_decoder, + &log_event)) == FLB_EVENT_DECODER_SUCCESS) { + + /* Get the record/map */ + map = *log_event.body; + map_size = map.via.map.size; + + stream = get_log_stream(ctx, tag, map); + if (!stream) { + flb_plg_debug(ctx->ins, "Couldn't determine log group & stream for record with tag %s", tag); + goto error; + } + + if (ctx->log_key) { + key_str = NULL; + key_str_size = 0; + check = FLB_FALSE; + found = FLB_FALSE; + + kv = map.via.map.ptr; + + for(j=0; j < map_size; j++) { + key = (kv+j)->key; + if (key.type == MSGPACK_OBJECT_BIN) { + key_str = (char *) key.via.bin.ptr; + key_str_size = key.via.bin.size; + check = FLB_TRUE; + } + if (key.type == MSGPACK_OBJECT_STR) { + key_str = (char *) key.via.str.ptr; + key_str_size = key.via.str.size; + check = FLB_TRUE; + } + + if (check == FLB_TRUE) { + if (strncmp(ctx->log_key, key_str, key_str_size) == 0) { + found = FLB_TRUE; + val = (kv+j)->val; + ret = add_event(ctx, buf, stream, &val, + &log_event.timestamp); + if (ret < 0 ) { + goto error; + } + } + } + + } + if (found == FLB_FALSE) { + flb_plg_error(ctx->ins, "Could not find log_key '%s' in record", + ctx->log_key); + } + + if (ret == 0) { + i++; + } + + continue; + } + + if (strncmp(input_plugin, "cpu", 3) == 0 + || strncmp(input_plugin, "mem", 3) == 0) { + /* Added for EMF support: Construct a list */ + struct mk_list flb_intermediate_metrics; + mk_list_init(&flb_intermediate_metrics); + + kv = map.via.map.ptr; + + /* + * Iterate through the record map, extract intermediate metric data, + * and add to the list. + */ + for (i = 0; i < map_size; i++) { + metric = flb_calloc(1, sizeof(struct flb_intermediate_metric)); + if (!metric) { + goto error; + } + + metric->key = (kv + i)->key; + metric->value = (kv + i)->val; + metric->metric_type = intermediate_metric_type; + metric->metric_unit = intermediate_metric_unit; + metric->timestamp = log_event.timestamp; + + mk_list_add(&metric->_head, &flb_intermediate_metrics); + + } + + /* The msgpack object is only valid during the lifetime of the + * sbuffer & the unpacked result. + */ + msgpack_sbuffer_init(&mp_sbuf); + msgpack_unpacked_init(&mp_emf_result); + + ret = pack_emf_payload(ctx, + &flb_intermediate_metrics, + input_plugin, + log_event.timestamp, + &mp_sbuf, + &mp_emf_result, + &emf_payload); + + /* free the intermediate metric list */ + + mk_list_foreach_safe(head, tmp, &flb_intermediate_metrics) { + an_item = mk_list_entry(head, struct flb_intermediate_metric, _head); + mk_list_del(&an_item->_head); + flb_free(an_item); + } + + if (ret != 0) { + flb_plg_error(ctx->ins, "Failed to convert EMF metrics to msgpack object. ret=%i", ret); + msgpack_unpacked_destroy(&mp_emf_result); + msgpack_sbuffer_destroy(&mp_sbuf); + goto error; + } + ret = add_event(ctx, buf, stream, &emf_payload, + &log_event.timestamp); + + msgpack_unpacked_destroy(&mp_emf_result); + msgpack_sbuffer_destroy(&mp_sbuf); + + } else { + ret = add_event(ctx, buf, stream, &map, + &log_event.timestamp); + } + + if (ret < 0 ) { + goto error; + } + + if (ret == 0) { + i++; + } + } + flb_log_event_decoder_destroy(&log_decoder); + + /* send any remaining events */ + ret = send_log_events(ctx, buf); + reset_flush_buf(ctx, buf); + if (ret < 0) { + return -1; + } + + /* return number of events */ + return i; + +error: + flb_log_event_decoder_destroy(&log_decoder); + + return -1; +} + +struct log_stream *get_or_create_log_stream(struct flb_cloudwatch *ctx, + flb_sds_t stream_name, + flb_sds_t group_name) +{ + int ret; + struct log_stream *new_stream; + struct log_stream *stream; + struct mk_list *tmp; + struct mk_list *head; + time_t now; + + /* check if the stream already exists */ + now = time(NULL); + mk_list_foreach_safe(head, tmp, &ctx->streams) { + stream = mk_list_entry(head, struct log_stream, _head); + if (strcmp(stream_name, stream->name) == 0 && strcmp(group_name, stream->group) == 0) { + return stream; + } + else { + /* check if stream is expired, if so, clean it up */ + if (stream->expiration < now) { + mk_list_del(&stream->_head); + log_stream_destroy(stream); + } + } + } + + /* create the new stream */ + new_stream = flb_calloc(1, sizeof(struct log_stream)); + if (!new_stream) { + flb_errno(); + return NULL; + } + new_stream->name = flb_sds_create(stream_name); + if (new_stream->name == NULL) { + flb_errno(); + return NULL; + } + new_stream->group = flb_sds_create(group_name); + if (new_stream->group == NULL) { + flb_errno(); + return NULL; + } + + ret = create_log_stream(ctx, new_stream, FLB_TRUE); + if (ret < 0) { + log_stream_destroy(new_stream); + return NULL; + } + new_stream->expiration = time(NULL) + FOUR_HOURS_IN_SECONDS; + + mk_list_add(&new_stream->_head, &ctx->streams); + return new_stream; +} + +struct log_stream *get_log_stream(struct flb_cloudwatch *ctx, flb_sds_t tag, + const msgpack_object map) +{ + flb_sds_t group_name = NULL; + flb_sds_t stream_name = NULL; + flb_sds_t tmp_s = NULL; + int free_group = FLB_FALSE; + int free_stream = FLB_FALSE; + struct log_stream *stream; + + /* templates take priority */ + if (ctx->ra_stream) { + stream_name = flb_ra_translate_check(ctx->ra_stream, tag, flb_sds_len(tag), + map, NULL, FLB_TRUE); + } + + if (ctx->ra_group) { + group_name = flb_ra_translate_check(ctx->ra_group, tag, flb_sds_len(tag), + map, NULL, FLB_TRUE); + } + + if (stream_name == NULL) { + if (ctx->stream_name) { + stream_name = ctx->stream_name; + } else { + free_stream = FLB_TRUE; + /* use log_stream_prefix */ + stream_name = flb_sds_create(ctx->log_stream_prefix); + if (!stream_name) { + flb_errno(); + if (group_name) { + flb_sds_destroy(group_name); + } + return NULL; + } + + tmp_s = flb_sds_cat(stream_name, tag, flb_sds_len(tag)); + if (!tmp_s) { + flb_errno(); + flb_sds_destroy(stream_name); + if (group_name) { + flb_sds_destroy(group_name); + } + return NULL; + } + stream_name = tmp_s; + } + } else { + free_stream = FLB_TRUE; + } + + if (group_name == NULL) { + group_name = ctx->group_name; + } else { + free_group = FLB_TRUE; + } + + flb_plg_debug(ctx->ins, "Using stream=%s, group=%s", stream_name, group_name); + + stream = get_or_create_log_stream(ctx, stream_name, group_name); + + if (free_group == FLB_TRUE) { + flb_sds_destroy(group_name); + } + if (free_stream == FLB_TRUE) { + flb_sds_destroy(stream_name); + } + return stream; +} + + +static int set_log_group_retention(struct flb_cloudwatch *ctx, struct log_stream *stream) +{ + if (ctx->log_retention_days <= 0) { + /* no need to set */ + return 0; + } + + struct flb_http_client *c = NULL; + struct flb_aws_client *cw_client; + flb_sds_t body; + flb_sds_t tmp; + flb_sds_t error; + + flb_plg_info(ctx->ins, "Setting retention policy on log group %s to %dd", stream->group, ctx->log_retention_days); + + body = flb_sds_create_size(68 + strlen(stream->group)); + if (!body) { + flb_sds_destroy(body); + flb_errno(); + return -1; + } + + /* construct CreateLogGroup request body */ + tmp = flb_sds_printf(&body, "{\"logGroupName\":\"%s\",\"retentionInDays\":%d}", stream->group, ctx->log_retention_days); + if (!tmp) { + flb_sds_destroy(body); + flb_errno(); + return -1; + } + body = tmp; + + if (plugin_under_test() == FLB_TRUE) { + c = mock_http_call("TEST_PUT_RETENTION_POLICY_ERROR", "PutRetentionPolicy"); + } + else { + cw_client = ctx->cw_client; + c = cw_client->client_vtable->request(cw_client, FLB_HTTP_POST, + "/", body, strlen(body), + &put_retention_policy_header, 1); + } + + if (c) { + flb_plg_debug(ctx->ins, "PutRetentionPolicy http status=%d", c->resp.status); + + if (c->resp.status == 200) { + /* success */ + flb_plg_info(ctx->ins, "Set retention policy to %d", ctx->log_retention_days); + flb_sds_destroy(body); + flb_http_client_destroy(c); + return 0; + } + + /* Check error */ + if (c->resp.payload_size > 0) { + error = flb_aws_error(c->resp.payload, c->resp.payload_size); + if (error != NULL) { + /* some other error occurred; notify user */ + flb_aws_print_error(c->resp.payload, c->resp.payload_size, + "PutRetentionPolicy", ctx->ins); + flb_sds_destroy(error); + } + else { + /* error can not be parsed, print raw response to debug */ + flb_plg_debug(ctx->ins, "Raw response: %s", c->resp.payload); + } + } + } + + flb_plg_error(ctx->ins, "Failed to putRetentionPolicy"); + if (c) { + flb_http_client_destroy(c); + } + flb_sds_destroy(body); + + return -1; +} + +int create_log_group(struct flb_cloudwatch *ctx, struct log_stream *stream) +{ + struct flb_http_client *c = NULL; + struct flb_aws_client *cw_client; + flb_sds_t body; + flb_sds_t tmp; + flb_sds_t error; + int ret; + + flb_plg_info(ctx->ins, "Creating log group %s", stream->group); + + body = flb_sds_create_size(25 + strlen(stream->group)); + if (!body) { + flb_sds_destroy(body); + flb_errno(); + return -1; + } + + /* construct CreateLogGroup request body */ + tmp = flb_sds_printf(&body, "{\"logGroupName\":\"%s\"}", stream->group); + if (!tmp) { + flb_sds_destroy(body); + flb_errno(); + return -1; + } + body = tmp; + + if (plugin_under_test() == FLB_TRUE) { + c = mock_http_call("TEST_CREATE_LOG_GROUP_ERROR", "CreateLogGroup"); + } + else { + cw_client = ctx->cw_client; + c = cw_client->client_vtable->request(cw_client, FLB_HTTP_POST, + "/", body, strlen(body), + &create_group_header, 1); + } + + if (c) { + flb_plg_debug(ctx->ins, "CreateLogGroup http status=%d", c->resp.status); + + if (c->resp.status == 200) { + /* success */ + flb_plg_info(ctx->ins, "Created log group %s", stream->group); + flb_sds_destroy(body); + flb_http_client_destroy(c); + ret = set_log_group_retention(ctx, stream); + return ret; + } + + /* Check error */ + if (c->resp.payload_size > 0) { + error = flb_aws_error(c->resp.payload, c->resp.payload_size); + if (error != NULL) { + if (strcmp(error, ERR_CODE_ALREADY_EXISTS) == 0) { + flb_plg_info(ctx->ins, "Log Group %s already exists", + stream->group); + flb_sds_destroy(body); + flb_sds_destroy(error); + flb_http_client_destroy(c); + ret = set_log_group_retention(ctx, stream); + return ret; + } + /* some other error occurred; notify user */ + flb_aws_print_error(c->resp.payload, c->resp.payload_size, + "CreateLogGroup", ctx->ins); + flb_sds_destroy(error); + } + else { + /* error can not be parsed, print raw response to debug */ + flb_plg_debug(ctx->ins, "Raw response: %s", c->resp.payload); + } + } + } + + flb_plg_error(ctx->ins, "Failed to create log group"); + if (c) { + flb_http_client_destroy(c); + } + flb_sds_destroy(body); + return -1; +} + +int create_log_stream(struct flb_cloudwatch *ctx, struct log_stream *stream, + int can_retry) +{ + + struct flb_http_client *c = NULL; + struct flb_aws_client *cw_client; + flb_sds_t body; + flb_sds_t tmp; + flb_sds_t error; + int ret; + + flb_plg_info(ctx->ins, "Creating log stream %s in log group %s", + stream->name, stream->group); + + body = flb_sds_create_size(50 + strlen(stream->group) + + strlen(stream->name)); + if (!body) { + flb_sds_destroy(body); + flb_errno(); + return -1; + } + + /* construct CreateLogStream request body */ + tmp = flb_sds_printf(&body, + "{\"logGroupName\":\"%s\",\"logStreamName\":\"%s\"}", + stream->group, + stream->name); + if (!tmp) { + flb_sds_destroy(body); + flb_errno(); + return -1; + } + body = tmp; + + cw_client = ctx->cw_client; + if (plugin_under_test() == FLB_TRUE) { + c = mock_http_call("TEST_CREATE_LOG_STREAM_ERROR", "CreateLogStream"); + } + else { + c = cw_client->client_vtable->request(cw_client, FLB_HTTP_POST, + "/", body, strlen(body), + &create_stream_header, 1); + } + + if (c) { + flb_plg_debug(ctx->ins,"CreateLogStream http status=%d", + c->resp.status); + + if (c->resp.status == 200) { + /* success */ + flb_plg_info(ctx->ins, "Created log stream %s", stream->name); + flb_sds_destroy(body); + flb_http_client_destroy(c); + return 0; + } + + /* Check error */ + if (c->resp.payload_size > 0) { + error = flb_aws_error(c->resp.payload, c->resp.payload_size); + if (error != NULL) { + if (strcmp(error, ERR_CODE_ALREADY_EXISTS) == 0) { + flb_plg_info(ctx->ins, "Log Stream %s already exists", + stream->name); + flb_sds_destroy(body); + flb_sds_destroy(error); + flb_http_client_destroy(c); + return 0; + } + + if (strcmp(error, ERR_CODE_NOT_FOUND) == 0) { + flb_sds_destroy(body); + flb_sds_destroy(error); + flb_http_client_destroy(c); + + if (ctx->create_group == FLB_TRUE) { + flb_plg_info(ctx->ins, "Log Group %s not found. Will attempt to create it.", + stream->group); + ret = create_log_group(ctx, stream); + if (ret < 0) { + return -1; + } else { + if (can_retry == FLB_TRUE) { + /* retry stream creation */ + return create_log_stream(ctx, stream, FLB_FALSE); + } else { + /* we failed to create the stream */ + return -1; + } + } + } else { + flb_plg_error(ctx->ins, "Log Group %s not found and `auto_create_group` disabled.", + stream->group); + } + return -1; + } + /* some other error occurred; notify user */ + flb_aws_print_error(c->resp.payload, c->resp.payload_size, + "CreateLogStream", ctx->ins); + flb_sds_destroy(error); + } + else { + /* error can not be parsed, print raw response to debug */ + flb_plg_debug(ctx->ins, "Raw response: %s", c->resp.payload); + } + } + } + + flb_plg_error(ctx->ins, "Failed to create log stream"); + if (c) { + flb_http_client_destroy(c); + } + flb_sds_destroy(body); + return -1; +} + +/* + * Returns -1 on failure, 0 on success, and 1 for a sequence token error, + * which means the caller can retry. + */ +int put_log_events(struct flb_cloudwatch *ctx, struct cw_flush *buf, + struct log_stream *stream, size_t payload_size) +{ + + struct flb_http_client *c = NULL; + struct flb_aws_client *cw_client; + flb_sds_t tmp; + flb_sds_t error; + int num_headers = 1; + int retry = FLB_TRUE; + + flb_plg_debug(ctx->ins, "Sending log events to log stream %s", stream->name); + + /* stream is being used, update expiration */ + stream->expiration = time(NULL) + FOUR_HOURS_IN_SECONDS; + + if (ctx->log_format != NULL) { + put_log_events_header[1].val = (char *) ctx->log_format; + put_log_events_header[1].val_len = strlen(ctx->log_format); + num_headers = 2; + } + +retry_request: + if (plugin_under_test() == FLB_TRUE) { + c = mock_http_call("TEST_PUT_LOG_EVENTS_ERROR", "PutLogEvents"); + } + else { + cw_client = ctx->cw_client; + c = cw_client->client_vtable->request(cw_client, FLB_HTTP_POST, + "/", buf->out_buf, payload_size, + put_log_events_header, num_headers); + } + + if (c) { + flb_plg_debug(ctx->ins, "PutLogEvents http status=%d", c->resp.status); + + if (c->resp.status == 200) { + if (c->resp.data == NULL || c->resp.data_len == 0 || strstr(c->resp.data, AMZN_REQUEST_ID_HEADER) == NULL) { + /* code was 200, but response is invalid, treat as failure */ + if (c->resp.data != NULL) { + flb_plg_debug(ctx->ins, "Could not find sequence token in " + "response: response body is empty: full data: `%.*s`", c->resp.data_len, c->resp.data); + } + flb_http_client_destroy(c); + + if (retry == FLB_TRUE) { + flb_plg_debug(ctx->ins, "issuing immediate retry for invalid response"); + retry = FLB_FALSE; + goto retry_request; + } + flb_plg_error(ctx->ins, "Recieved code 200 but response was invalid, %s header not found", + AMZN_REQUEST_ID_HEADER); + return -1; + } + + + /* success */ + if (c->resp.payload_size > 0) { + flb_plg_debug(ctx->ins, "Sent events to %s", stream->name); + tmp = flb_json_get_val(c->resp.payload, c->resp.payload_size, + "nextSequenceToken"); + if (tmp) { + if (stream->sequence_token != NULL) { + flb_sds_destroy(stream->sequence_token); + } + stream->sequence_token = tmp; + + flb_http_client_destroy(c); + return 0; + } + else { + flb_plg_error(ctx->ins, "Could not find sequence token in " + "response: %s", c->resp.payload); + } + } + + flb_http_client_destroy(c); + return 0; + } + + /* Check error */ + if (c->resp.payload_size > 0) { + error = flb_aws_error(c->resp.payload, c->resp.payload_size); + if (error != NULL) { + if (strcmp(error, ERR_CODE_INVALID_SEQUENCE_TOKEN) == 0) { + /* + * This case will happen when we do not know the correct + * sequence token; we can find it in the error response + * and retry. + */ + flb_plg_debug(ctx->ins, "Sequence token was invalid, " + "will retry"); + tmp = flb_json_get_val(c->resp.payload, c->resp.payload_size, + "expectedSequenceToken"); + if (tmp) { + if (stream->sequence_token != NULL) { + flb_sds_destroy(stream->sequence_token); + } + stream->sequence_token = tmp; + flb_sds_destroy(error); + flb_http_client_destroy(c); + /* tell the caller to retry */ + return 1; + } + } else if (strcmp(error, ERR_CODE_DATA_ALREADY_ACCEPTED) == 0) { + /* not sure what causes this but it counts as success */ + flb_plg_info(ctx->ins, "Got %s, a previous retry must have succeeded asychronously", ERR_CODE_DATA_ALREADY_ACCEPTED); + flb_sds_destroy(error); + flb_http_client_destroy(c); + /* success */ + return 0; + } + /* some other error occurred; notify user */ + flb_aws_print_error(c->resp.payload, c->resp.payload_size, + "PutLogEvents", ctx->ins); + flb_sds_destroy(error); + } + else { + /* error could not be parsed, print raw response to debug */ + flb_plg_debug(ctx->ins, "Raw response: %s", c->resp.payload); + } + } + } + + flb_plg_error(ctx->ins, "Failed to send log events"); + if (c) { + flb_http_client_destroy(c); + } + return -1; +} + + +void cw_flush_destroy(struct cw_flush *buf) +{ + if (buf) { + flb_free(buf->tmp_buf); + flb_free(buf->out_buf); + flb_free(buf->events); + flb_free(buf->event_buf); + flb_free(buf); + } +} diff --git a/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_api.h b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_api.h new file mode 100644 index 000000000..99919055b --- /dev/null +++ b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_api.h @@ -0,0 +1,57 @@ +/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ + +/* Fluent Bit + * ========== + * Copyright (C) 2015-2022 The Fluent Bit Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLB_OUT_CLOUDWATCH_API +#define FLB_OUT_CLOUDWATCH_API + +/* + * The CloudWatch API documents that the maximum payload is 1,048,576 bytes + * For reasons that are under investigation, using that number in this plugin + * leads to API errors. No issues have been seen setting it to 1,000,000 bytes. + */ +#define PUT_LOG_EVENTS_PAYLOAD_SIZE 1048576 +#define MAX_EVENTS_PER_PUT 10000 + +/* number of characters needed to 'start' a PutLogEvents payload */ +#define PUT_LOG_EVENTS_HEADER_LEN 72 +/* number of characters needed per event in a PutLogEvents payload */ +#define PUT_LOG_EVENTS_PER_EVENT_LEN 42 +/* number of characters needed to 'end' a PutLogEvents payload */ +#define PUT_LOG_EVENTS_FOOTER_LEN 4 + +/* 256KiB minus 26 bytes for the event */ +#define MAX_EVENT_LEN 262118 + +#include "cloudwatch_logs.h" + +void cw_flush_destroy(struct cw_flush *buf); + +int process_and_send(struct flb_cloudwatch *ctx, const char *input_plugin, + struct cw_flush *buf, flb_sds_t tag, + const char *data, size_t bytes); +int create_log_stream(struct flb_cloudwatch *ctx, struct log_stream *stream, int can_retry); +struct log_stream *get_log_stream(struct flb_cloudwatch *ctx, flb_sds_t tag, + const msgpack_object map); +int put_log_events(struct flb_cloudwatch *ctx, struct cw_flush *buf, + struct log_stream *stream, + size_t payload_size); +int create_log_group(struct flb_cloudwatch *ctx, struct log_stream *stream); +int compare_events(const void *a_arg, const void *b_arg); + +#endif diff --git a/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_logs.c b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_logs.c new file mode 100644 index 000000000..f6aef2240 --- /dev/null +++ b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_logs.c @@ -0,0 +1,670 @@ +/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ + +/* Fluent Bit + * ========== + * Copyright (C) 2015-2022 The Fluent Bit Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <fluent-bit/flb_compat.h> +#include <fluent-bit/flb_info.h> +#include <fluent-bit/flb_output.h> +#include <fluent-bit/flb_utils.h> +#include <fluent-bit/flb_slist.h> +#include <fluent-bit/flb_time.h> +#include <fluent-bit/flb_pack.h> +#include <fluent-bit/flb_config_map.h> +#include <fluent-bit/flb_output_plugin.h> + +#include <fluent-bit/flb_sds.h> +#include <fluent-bit/flb_aws_credentials.h> +#include <fluent-bit/flb_aws_util.h> +#include <fluent-bit/flb_mem.h> +#include <fluent-bit/flb_info.h> +#include <fluent-bit/flb_http_client.h> +#include <fluent-bit/flb_utils.h> + +#include <monkey/mk_core.h> +#include <msgpack.h> +#include <string.h> +#include <stdio.h> + +#include "cloudwatch_logs.h" +#include "cloudwatch_api.h" + +static struct flb_aws_header content_type_header = { + .key = "Content-Type", + .key_len = 12, + .val = "application/x-amz-json-1.1", + .val_len = 26, +}; + +static int cb_cloudwatch_init(struct flb_output_instance *ins, + struct flb_config *config, void *data) +{ + const char *tmp; + char *session_name = NULL; + struct flb_cloudwatch *ctx = NULL; + struct cw_flush *buf = NULL; + int ret; + flb_sds_t tmp_sds = NULL; + (void) config; + (void) data; + + ctx = flb_calloc(1, sizeof(struct flb_cloudwatch)); + if (!ctx) { + flb_errno(); + return -1; + } + + mk_list_init(&ctx->streams); + + ctx->ins = ins; + + /* Populate context with config map defaults and incoming properties */ + ret = flb_output_config_map_set(ins, (void *) ctx); + if (ret == -1) { + flb_plg_error(ctx->ins, "configuration error"); + goto error; + } + + tmp = flb_output_get_property("log_group_name", ins); + if (tmp) { + ctx->log_group = tmp; + ctx->group_name = flb_sds_create(tmp); + if (!ctx->group_name) { + flb_plg_error(ctx->ins, "Could not create log group context property"); + goto error; + } + } else { + flb_plg_error(ctx->ins, "'log_group_name' is a required field"); + goto error; + } + + tmp = flb_output_get_property("log_stream_name", ins); + if (tmp) { + ctx->log_stream_name = tmp; + ctx->stream_name = flb_sds_create(tmp); + if (!ctx->stream_name) { + flb_plg_error(ctx->ins, "Could not create log group context property"); + goto error; + } + } + + tmp = flb_output_get_property("log_stream_prefix", ins); + if (tmp) { + ctx->log_stream_prefix = tmp; + } + + if (!ctx->log_stream_name && !ctx->log_stream_prefix) { + flb_plg_error(ctx->ins, "Either 'log_stream_name' or 'log_stream_prefix'" + " is required"); + goto error; + } + + if (ctx->log_stream_name && ctx->log_stream_prefix) { + flb_plg_error(ctx->ins, "Either 'log_stream_name' or 'log_stream_prefix'" + " is required"); + goto error; + } + + tmp = flb_output_get_property("log_group_template", ins); + if (tmp) { + ctx->ra_group = flb_ra_create((char *) tmp, FLB_FALSE); + if (ctx->ra_group == NULL) { + flb_plg_error(ctx->ins, "Could not parse `log_group_template`"); + goto error; + } + } + + tmp = flb_output_get_property("log_stream_template", ins); + if (tmp) { + ctx->ra_stream = flb_ra_create((char *) tmp, FLB_FALSE); + if (ctx->ra_stream == NULL) { + flb_plg_error(ctx->ins, "Could not parse `log_stream_template`"); + goto error; + } + } + + tmp = flb_output_get_property("log_format", ins); + if (tmp) { + ctx->log_format = tmp; + } + + tmp = flb_output_get_property("endpoint", ins); + if (tmp) { + ctx->custom_endpoint = FLB_TRUE; + ctx->endpoint = removeProtocol((char *) tmp, "https://"); + } + else { + ctx->custom_endpoint = FLB_FALSE; + } + + tmp = flb_output_get_property("log_key", ins); + if (tmp) { + ctx->log_key = tmp; + } + + tmp = flb_output_get_property("extra_user_agent", ins); + if (tmp) { + ctx->extra_user_agent = tmp; + } + + tmp = flb_output_get_property("region", ins); + if (tmp) { + ctx->region = tmp; + } else { + flb_plg_error(ctx->ins, "'region' is a required field"); + goto error; + } + + tmp = flb_output_get_property("metric_namespace", ins); + if (tmp) + { + flb_plg_info(ctx->ins, "Metric Namespace=%s", tmp); + ctx->metric_namespace = flb_sds_create(tmp); + } + + tmp = flb_output_get_property("metric_dimensions", ins); + if (tmp) + { + flb_plg_info(ctx->ins, "Metric Dimensions=%s", tmp); + ctx->metric_dimensions = flb_utils_split(tmp, ';', 256); + } + + ctx->create_group = FLB_FALSE; + tmp = flb_output_get_property("auto_create_group", ins); + if (tmp) { + ctx->create_group = flb_utils_bool(tmp); + } + + ctx->retry_requests = FLB_TRUE; + tmp = flb_output_get_property("auto_retry_requests", ins); + /* native plugins use On/Off as bool, the old Go plugin used true/false */ + if (tmp && (strcasecmp(tmp, "Off") == 0 || strcasecmp(tmp, "false") == 0)) { + ctx->retry_requests = FLB_FALSE; + } + + ctx->log_retention_days = 0; + tmp = flb_output_get_property("log_retention_days", ins); + if (tmp) { + ctx->log_retention_days = atoi(tmp); + } + + tmp = flb_output_get_property("role_arn", ins); + if (tmp) { + ctx->role_arn = tmp; + } + + tmp = flb_output_get_property("sts_endpoint", ins); + if (tmp) { + ctx->sts_endpoint = (char *) tmp; + } + + /* one tls instance for provider, one for cw client */ + ctx->cred_tls = flb_tls_create(FLB_TLS_CLIENT_MODE, + FLB_TRUE, + ins->tls_debug, + ins->tls_vhost, + ins->tls_ca_path, + ins->tls_ca_file, + ins->tls_crt_file, + ins->tls_key_file, + ins->tls_key_passwd); + + if (!ctx->cred_tls) { + flb_plg_error(ctx->ins, "Failed to create tls context"); + goto error; + } + + ctx->client_tls = flb_tls_create(FLB_TLS_CLIENT_MODE, + ins->tls_verify, + ins->tls_debug, + ins->tls_vhost, + ins->tls_ca_path, + ins->tls_ca_file, + ins->tls_crt_file, + ins->tls_key_file, + ins->tls_key_passwd); + if (!ctx->client_tls) { + flb_plg_error(ctx->ins, "Failed to create tls context"); + goto error; + } + + ctx->aws_provider = flb_standard_chain_provider_create(config, + ctx->cred_tls, + (char *) ctx->region, + (char *) ctx->sts_endpoint, + NULL, + flb_aws_client_generator(), + ctx->profile); + if (!ctx->aws_provider) { + flb_plg_error(ctx->ins, "Failed to create AWS Credential Provider"); + goto error; + } + + if(ctx->role_arn) { + /* set up sts assume role provider */ + session_name = flb_sts_session_name(); + if (!session_name) { + flb_plg_error(ctx->ins, + "Failed to generate random STS session name"); + goto error; + } + + /* STS provider needs yet another separate TLS instance */ + ctx->sts_tls = flb_tls_create(FLB_TLS_CLIENT_MODE, + FLB_TRUE, + ins->tls_debug, + ins->tls_vhost, + ins->tls_ca_path, + ins->tls_ca_file, + ins->tls_crt_file, + ins->tls_key_file, + ins->tls_key_passwd); + if (!ctx->sts_tls) { + flb_errno(); + goto error; + } + + ctx->base_aws_provider = ctx->aws_provider; + + ctx->aws_provider = flb_sts_provider_create(config, + ctx->sts_tls, + ctx->base_aws_provider, + (char *) ctx->external_id, + (char *) ctx->role_arn, + session_name, + (char *) ctx->region, + (char *) ctx->sts_endpoint, + NULL, + flb_aws_client_generator()); + if (!ctx->aws_provider) { + flb_plg_error(ctx->ins, + "Failed to create AWS STS Credential Provider"); + goto error; + } + /* session name can freed after provider is created */ + flb_free(session_name); + session_name = NULL; + } + + /* initialize credentials and set to sync mode */ + ctx->aws_provider->provider_vtable->sync(ctx->aws_provider); + ctx->aws_provider->provider_vtable->init(ctx->aws_provider); + ctx->aws_provider->provider_vtable->upstream_set(ctx->aws_provider, ctx->ins); + + if (ctx->endpoint == NULL) { + ctx->endpoint = flb_aws_endpoint("logs", (char *) ctx->region); + if (!ctx->endpoint) { + goto error; + } + } + + struct flb_aws_client_generator *generator = flb_aws_client_generator(); + ctx->cw_client = generator->create(); + if (!ctx->cw_client) { + goto error; + } + ctx->cw_client->name = "cw_client"; + ctx->cw_client->has_auth = FLB_TRUE; + ctx->cw_client->provider = ctx->aws_provider; + ctx->cw_client->region = (char *) ctx->region; + ctx->cw_client->service = "logs"; + ctx->cw_client->port = (ins->host.port != 0) ? ins->host.port : 443; + ctx->cw_client->flags = (ins->use_tls) ? FLB_IO_TLS : FLB_IO_TCP; + ctx->cw_client->proxy = NULL; + ctx->cw_client->static_headers = &content_type_header; + ctx->cw_client->static_headers_len = 1; + tmp_sds = flb_sds_create(ctx->extra_user_agent); + if (!tmp_sds) { + flb_errno(); + goto error; + } + ctx->cw_client->extra_user_agent = tmp_sds; + ctx->cw_client->retry_requests = ctx->retry_requests; + + struct flb_upstream *upstream = flb_upstream_create(config, ctx->endpoint, + ctx->cw_client->port, + ctx->cw_client->flags, + ctx->client_tls); + if (!upstream) { + flb_plg_error(ctx->ins, "Connection initialization error"); + goto error; + } + + ctx->cw_client->upstream = upstream; + flb_output_upstream_set(upstream, ctx->ins); + ctx->cw_client->host = ctx->endpoint; + + /* alloc the payload/processing buffer */ + buf = flb_calloc(1, sizeof(struct cw_flush)); + if (!buf) { + flb_errno(); + goto error; + } + + buf->out_buf = flb_malloc(PUT_LOG_EVENTS_PAYLOAD_SIZE); + if (!buf->out_buf) { + flb_errno(); + cw_flush_destroy(buf); + goto error; + } + buf->out_buf_size = PUT_LOG_EVENTS_PAYLOAD_SIZE; + + buf->tmp_buf = flb_malloc(sizeof(char) * PUT_LOG_EVENTS_PAYLOAD_SIZE); + if (!buf->tmp_buf) { + flb_errno(); + cw_flush_destroy(buf); + goto error; + } + buf->tmp_buf_size = PUT_LOG_EVENTS_PAYLOAD_SIZE; + + buf->events = flb_malloc(sizeof(struct cw_event) * MAX_EVENTS_PER_PUT); + if (!buf->events) { + flb_errno(); + cw_flush_destroy(buf); + goto error; + } + buf->events_capacity = MAX_EVENTS_PER_PUT; + + ctx->buf = buf; + + + /* Export context */ + flb_output_set_context(ins, ctx); + + return 0; + +error: + flb_free(session_name); + flb_plg_error(ctx->ins, "Initialization failed"); + flb_cloudwatch_ctx_destroy(ctx); + return -1; +} + +static void cb_cloudwatch_flush(struct flb_event_chunk *event_chunk, + struct flb_output_flush *out_flush, + struct flb_input_instance *i_ins, + void *out_context, + struct flb_config *config) +{ + struct flb_cloudwatch *ctx = out_context; + int event_count; + (void) i_ins; + (void) config; + + event_count = process_and_send(ctx, i_ins->p->name, ctx->buf, event_chunk->tag, + event_chunk->data, event_chunk->size); + if (event_count < 0) { + flb_plg_error(ctx->ins, "Failed to send events"); + FLB_OUTPUT_RETURN(FLB_RETRY); + } + + // TODO: this msg is innaccurate if events are skipped + flb_plg_debug(ctx->ins, "Sent %d events to CloudWatch", event_count); + + FLB_OUTPUT_RETURN(FLB_OK); +} + +void flb_cloudwatch_ctx_destroy(struct flb_cloudwatch *ctx) +{ + struct log_stream *stream; + struct mk_list *tmp; + struct mk_list *head; + + if (ctx != NULL) { + if (ctx->base_aws_provider) { + flb_aws_provider_destroy(ctx->base_aws_provider); + } + + if (ctx->buf) { + cw_flush_destroy(ctx->buf); + } + + if (ctx->aws_provider) { + flb_aws_provider_destroy(ctx->aws_provider); + } + + if (ctx->cred_tls) { + flb_tls_destroy(ctx->cred_tls); + } + + if (ctx->sts_tls) { + flb_tls_destroy(ctx->sts_tls); + } + + if (ctx->client_tls) { + flb_tls_destroy(ctx->client_tls); + } + + if (ctx->cw_client) { + flb_aws_client_destroy(ctx->cw_client); + } + + if (ctx->custom_endpoint == FLB_FALSE) { + flb_free(ctx->endpoint); + } + + if (ctx->ra_group) { + flb_ra_destroy(ctx->ra_group); + } + + if (ctx->ra_stream) { + flb_ra_destroy(ctx->ra_stream); + } + + if (ctx->group_name) { + flb_sds_destroy(ctx->group_name); + } + + if (ctx->stream_name) { + flb_sds_destroy(ctx->stream_name); + } + + mk_list_foreach_safe(head, tmp, &ctx->streams) { + stream = mk_list_entry(head, struct log_stream, _head); + mk_list_del(&stream->_head); + log_stream_destroy(stream); + } + flb_free(ctx); + } +} + +static int cb_cloudwatch_exit(void *data, struct flb_config *config) +{ + struct flb_cloudwatch *ctx = data; + + flb_cloudwatch_ctx_destroy(ctx); + return 0; +} + +void log_stream_destroy(struct log_stream *stream) +{ + if (stream) { + if (stream->name) { + flb_sds_destroy(stream->name); + } + if (stream->sequence_token) { + flb_sds_destroy(stream->sequence_token); + } + if (stream->group) { + flb_sds_destroy(stream->group); + } + flb_free(stream); + } +} + +/* Configuration properties map */ +static struct flb_config_map config_map[] = { + { + FLB_CONFIG_MAP_STR, "region", NULL, + 0, FLB_FALSE, 0, + "The AWS region to send logs to" + }, + + { + FLB_CONFIG_MAP_STR, "log_group_name", NULL, + 0, FLB_FALSE, 0, + "CloudWatch Log Group Name" + }, + + { + FLB_CONFIG_MAP_STR, "log_stream_name", NULL, + 0, FLB_FALSE, 0, + "CloudWatch Log Stream Name; not compatible with `log_stream_prefix`" + }, + + { + FLB_CONFIG_MAP_STR, "log_stream_prefix", NULL, + 0, FLB_FALSE, 0, + "Prefix for CloudWatch Log Stream Name; the tag is appended to the prefix" + " to form the stream name" + }, + + { + FLB_CONFIG_MAP_STR, "log_group_template", NULL, + 0, FLB_FALSE, 0, + "Template for CW Log Group name using record accessor syntax. " + "Plugin falls back to the log_group_name configured if needed." + }, + + { + FLB_CONFIG_MAP_STR, "log_stream_template", NULL, + 0, FLB_FALSE, 0, + "Template for CW Log Stream name using record accessor syntax. " + "Plugin falls back to the log_stream_name or log_stream_prefix configured if needed." + }, + + { + FLB_CONFIG_MAP_STR, "log_key", NULL, + 0, FLB_FALSE, 0, + "By default, the whole log record will be sent to CloudWatch. " + "If you specify a key name with this option, then only the value of " + "that key will be sent to CloudWatch. For example, if you are using " + "the Fluentd Docker log driver, you can specify log_key log and only " + "the log message will be sent to CloudWatch." + }, + + { + FLB_CONFIG_MAP_STR, "extra_user_agent", NULL, + 0, FLB_FALSE, 0, + "This option appends a string to the default user agent. " + "AWS asks that you not manually set this field yourself, " + "it is reserved for use in our vended configurations, " + "for example, EKS Container Insights." + }, + + { + FLB_CONFIG_MAP_STR, "log_format", NULL, + 0, FLB_FALSE, 0, + "An optional parameter that can be used to tell CloudWatch the format " + "of the data. A value of json/emf enables CloudWatch to extract custom " + "metrics embedded in a JSON payload." + }, + + { + FLB_CONFIG_MAP_STR, "role_arn", NULL, + 0, FLB_FALSE, 0, + "ARN of an IAM role to assume (ex. for cross account access)." + }, + + { + FLB_CONFIG_MAP_BOOL, "auto_create_group", "false", + 0, FLB_FALSE, 0, + "Automatically create the log group (log streams will always automatically" + " be created)" + }, + + { + FLB_CONFIG_MAP_BOOL, "auto_retry_requests", "true", + 0, FLB_FALSE, 0, + "Immediately retry failed requests to AWS services once. This option " + "does not affect the normal Fluent Bit retry mechanism with backoff. " + "Instead, it enables an immediate retry with no delay for networking " + "errors, which may help improve throughput when there are transient/random " + "networking issues." + }, + + { + FLB_CONFIG_MAP_INT, "log_retention_days", "0", + 0, FLB_FALSE, 0, + "If set to a number greater than zero, and newly create log group's " + "retention policy is set to this many days. " + "Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]" + }, + + { + FLB_CONFIG_MAP_STR, "endpoint", NULL, + 0, FLB_FALSE, 0, + "Specify a custom endpoint for the CloudWatch Logs API" + }, + + { + FLB_CONFIG_MAP_STR, "sts_endpoint", NULL, + 0, FLB_FALSE, 0, + "Specify a custom endpoint for the STS API, can be used with the role_arn parameter" + }, + + { + FLB_CONFIG_MAP_STR, "external_id", NULL, + 0, FLB_TRUE, offsetof(struct flb_cloudwatch, external_id), + "Specify an external ID for the STS API, can be used with the role_arn parameter if your role " + "requires an external ID." + }, + + { + FLB_CONFIG_MAP_STR, "metric_namespace", NULL, + 0, FLB_FALSE, 0, + "Metric namespace for CloudWatch EMF logs" + }, + + { + FLB_CONFIG_MAP_STR, "metric_dimensions", NULL, + 0, FLB_FALSE, 0, + "Metric dimensions is a list of lists. If you have only one list of " + "dimensions, put the values as a comma seperated string. If you want to put " + "list of lists, use the list as semicolon seperated strings. If your value " + "is 'd1,d2;d3', we will consider it as [[d1, d2],[d3]]." + }, + + { + FLB_CONFIG_MAP_STR, "profile", NULL, + 0, FLB_TRUE, offsetof(struct flb_cloudwatch, profile), + "AWS Profile name. AWS Profiles can be configured with AWS CLI and are usually stored in " + "$HOME/.aws/ directory." + }, + + /* EOF */ + {0} +}; + +/* Plugin registration */ +struct flb_output_plugin out_cloudwatch_logs_plugin = { + .name = "cloudwatch_logs", + .description = "Send logs to Amazon CloudWatch", + .cb_init = cb_cloudwatch_init, + .cb_flush = cb_cloudwatch_flush, + .cb_exit = cb_cloudwatch_exit, + + /* + * Allow cloudwatch to use async network stack synchronously by opting into + * FLB_OUTPUT_SYNCHRONOUS synchronous task scheduler + */ + .flags = FLB_OUTPUT_SYNCHRONOUS, + .workers = 1, + + /* Configuration */ + .config_map = config_map, +}; diff --git a/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_logs.h b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_logs.h new file mode 100644 index 000000000..7fe8bf0b7 --- /dev/null +++ b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_logs.h @@ -0,0 +1,158 @@ +/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ + +/* Fluent Bit + * ========== + * Copyright (C) 2015-2022 The Fluent Bit Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLB_OUT_CLOUDWATCH_LOGS_H +#define FLB_OUT_CLOUDWATCH_LOGS_H + +#include <fluent-bit/flb_info.h> +#include <fluent-bit/flb_sds.h> +#include <fluent-bit/flb_aws_credentials.h> +#include <fluent-bit/flb_http_client.h> +#include <fluent-bit/flb_aws_util.h> +#include <fluent-bit/flb_signv4.h> + +#include <fluent-bit/flb_record_accessor.h> +#include <fluent-bit/record_accessor/flb_ra_parser.h> + +/* buffers used for each flush */ +struct cw_flush { + /* temporary buffer for storing the serialized event messages */ + char *tmp_buf; + size_t tmp_buf_size; + /* current index of tmp_buf */ + size_t tmp_buf_offset; + + /* projected final size of the payload for this flush */ + size_t data_size; + + /* log events- each of these has a pointer to their message in tmp_buf */ + struct cw_event *events; + int events_capacity; + /* current event */ + int event_index; + + /* the payload of the API request */ + char *out_buf; + size_t out_buf_size; + + /* buffer used to temporarily hold an event during processing */ + char *event_buf; + size_t event_buf_size; + + /* current log stream that we are sending records too */ + struct log_stream *current_stream; +}; + +struct cw_event { + char *json; + size_t len; + // TODO: re-usable in kinesis streams plugin if we make it timespec instead + // uint64_t? + unsigned long long timestamp; +}; + +struct log_stream { + flb_sds_t name; + flb_sds_t group; + flb_sds_t sequence_token; + /* + * log streams in CloudWatch do not expire; but our internal representations + * of them are periodically cleaned up if they have been unused for too long + */ + time_t expiration; + + /* + * Used to track the "time span" of a single PutLogEvents payload + * Which can not exceed 24 hours. + */ + unsigned long long oldest_event; + unsigned long long newest_event; + + struct mk_list _head; +}; + +void log_stream_destroy(struct log_stream *stream); + +struct flb_cloudwatch { + /* + * TLS instances can not be re-used. So we have one for: + * - Base cred provider (needed for EKS provider) + * - STS Assume role provider + * - The CloudWatch Logs client for this plugin + */ + struct flb_tls *cred_tls; + struct flb_tls *sts_tls; + struct flb_tls *client_tls; + struct flb_aws_provider *aws_provider; + struct flb_aws_provider *base_aws_provider; + struct flb_aws_client *cw_client; + + /* configuration options */ + const char *log_stream_name; + const char *log_stream_prefix; + const char *log_group; + const char *region; + const char *sts_endpoint; + const char *log_format; + const char *role_arn; + const char *log_key; + const char *extra_user_agent; + const char *external_id; + const char *profile; + int custom_endpoint; + /* Should the plugin create the log group */ + int create_group; + + flb_sds_t group_name; + flb_sds_t stream_name; + + /* Should requests to AWS services be retried */ + int retry_requests; + + /* If set to a number greater than zero, and newly create log group's retention policy is set to this many days. */ + int log_retention_days; + + /* must be freed on shutdown if custom_endpoint is not set */ + char *endpoint; + + /* templates */ + struct flb_record_accessor *ra_group; + struct flb_record_accessor *ra_stream; + + /* stores log streams we're putting to */ + struct mk_list streams; + + /* buffers for data processing and request payload */ + struct cw_flush *buf; + /* The namespace to use for the metric */ + flb_sds_t metric_namespace; + + /* Metric dimensions is a list of lists. If you have only one list of + dimensions, put the values as a comma seperated string. If you want to put + list of lists, use the list as semicolon seperated strings. If your value + is 'd1,d2;d3', we will consider it as [[d1, d2],[d3]]*/ + struct mk_list *metric_dimensions; + + /* Plugin output instance reference */ + struct flb_output_instance *ins; +}; + +void flb_cloudwatch_ctx_destroy(struct flb_cloudwatch *ctx); + +#endif |